code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
lowerCAmelCase = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def _lowerCamelCase( lowercase__ ) -> Dict:
'''simple docstring'''
__lowercase= list(s_dict.keys() )
for key in keys:
__lowercase= R'.*/layers_(\d+)'
__lowercase= key
if re.match(lowercase__ , lowercase__ ):
__lowercase= re.sub(R'layers_(\d+)' , R'block/\1/layer' , lowercase__ )
__lowercase= R'(encoder|decoder)\/'
if re.match(lowercase__ , lowercase__ ):
__lowercase= re.match(lowercase__ , lowercase__ ).groups()
if groups[0] == "encoder":
__lowercase= re.sub(R'/mlp/' , R'/1/mlp/' , lowercase__ )
__lowercase= re.sub(R'/pre_mlp_layer_norm/' , R'/1/layer_norm/' , lowercase__ )
elif groups[0] == "decoder":
__lowercase= re.sub(R'/mlp/' , R'/2/mlp/' , lowercase__ )
__lowercase= re.sub(R'/pre_mlp_layer_norm/' , R'/2/layer_norm/' , lowercase__ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
__lowercase= new_key.replace(lowercase__ , lowercase__ )
print(F'{key} -> {new_key}' )
__lowercase= s_dict.pop(lowercase__ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
__lowercase= s_dict[
'encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
__lowercase= s_dict[
'decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
__lowercase= s_dict[key].shape[0]
__lowercase= s_dict[key]
for idx in range(lowercase__ ):
__lowercase= expert_weihts[idx]
print(F'{key} -> {key.replace("expert/" , "nested fstring" )}' )
s_dict.pop(lowercase__ )
return s_dict
lowerCAmelCase = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Optional[Any]:
'''simple docstring'''
import regex as re
with open(lowercase__ , 'r' ) as f:
__lowercase= f.read()
__lowercase= re.findall(R'(.*) = ([0-9.]*)' , lowercase__ )
__lowercase= {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
__lowercase= float(lowercase__ ) if '.' in value else int(lowercase__ )
__lowercase= re.findall(R'(.*activations) = \(\'(.*)\',\)' , lowercase__ )[0]
__lowercase= str(activation[1] )
__lowercase= num_experts
__lowercase= SwitchTransformersConfig(**lowercase__ )
return config
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__=None , lowercase__="./" , lowercase__=8 ) -> Dict:
'''simple docstring'''
print(F'Loading flax weights from : {flax_checkpoint_path}' )
__lowercase= checkpoints.load_tax_checkpoint(lowercase__ )
if gin_file is not None:
__lowercase= convert_gin_to_config(lowercase__ , lowercase__ )
else:
__lowercase= SwitchTransformersConfig.from_pretrained(lowercase__ )
__lowercase= SwitchTransformersForConditionalGeneration(lowercase__ )
__lowercase= flax_params['target']
__lowercase= flatten_dict(lowercase__ , sep='/' )
__lowercase= rename_keys(lowercase__ )
__lowercase= unflatten_dict(lowercase__ , sep='/' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(lowercase__ , lowercase__ )
print(F'Save PyTorch model to {pytorch_dump_path}' )
pt_model.save_pretrained(lowercase__ )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
lowerCAmelCase = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 295 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 5_1_2,
'''facebook/dpr-ctx_encoder-multiset-base''': 5_1_2,
}
lowerCAmelCase = {
'''facebook/dpr-question_encoder-single-nq-base''': 5_1_2,
'''facebook/dpr-question_encoder-multiset-base''': 5_1_2,
}
lowerCAmelCase = {
'''facebook/dpr-reader-single-nq-base''': 5_1_2,
'''facebook/dpr-reader-multiset-base''': 5_1_2,
}
lowerCAmelCase = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCAmelCase = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCAmelCase = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class A ( A_ ):
UpperCamelCase_ : List[Any] =VOCAB_FILES_NAMES
UpperCamelCase_ : Dict =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : List[Any] =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : Optional[int] =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ : int =DPRContextEncoderTokenizer
class A ( A_ ):
UpperCamelCase_ : Any =VOCAB_FILES_NAMES
UpperCamelCase_ : List[str] =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Optional[Any] =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : Optional[Any] =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ : List[Any] =DPRQuestionEncoderTokenizer
lowerCAmelCase = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
lowerCAmelCase = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
lowerCAmelCase = R'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(A_ )
class A :
def __call__(self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = False , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , **lowerCAmelCase , ):
if titles is None and texts is None:
return super().__call__(
lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase , max_length=lowerCAmelCase , return_tensors=lowerCAmelCase , return_attention_mask=lowerCAmelCase , **lowerCAmelCase , )
elif titles is None or texts is None:
__lowercase= titles if texts is None else texts
return super().__call__(
lowerCAmelCase , lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase , max_length=lowerCAmelCase , return_tensors=lowerCAmelCase , return_attention_mask=lowerCAmelCase , **lowerCAmelCase , )
__lowercase= titles if not isinstance(lowerCAmelCase , lowerCAmelCase ) else [titles]
__lowercase= texts if not isinstance(lowerCAmelCase , lowerCAmelCase ) else [texts]
__lowercase= len(lowerCAmelCase )
__lowercase= questions if not isinstance(lowerCAmelCase , lowerCAmelCase ) else [questions] * n_passages
assert len(lowerCAmelCase ) == len(
lowerCAmelCase ), f'There should be as many titles than texts but got {len(lowerCAmelCase )} titles and {len(lowerCAmelCase )} texts.'
__lowercase= super().__call__(lowerCAmelCase , lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase )['input_ids']
__lowercase= super().__call__(lowerCAmelCase , add_special_tokens=lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase )['input_ids']
__lowercase= {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCAmelCase , lowerCAmelCase )
]
}
if return_attention_mask is not False:
__lowercase= []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
__lowercase= attention_mask
return self.pad(lowerCAmelCase , padding=lowerCAmelCase , max_length=lowerCAmelCase , return_tensors=lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1_6 , lowerCAmelCase = 6_4 , lowerCAmelCase = 4 , ):
__lowercase= reader_input['input_ids']
__lowercase, __lowercase, __lowercase= reader_output[:3]
__lowercase= len(lowerCAmelCase )
__lowercase= sorted(range(lowerCAmelCase ) , reverse=lowerCAmelCase , key=relevance_logits.__getitem__ )
__lowercase= []
for doc_id in sorted_docs:
__lowercase= list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
__lowercase= sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__lowercase= sequence_ids.index(self.pad_token_id )
else:
__lowercase= len(lowerCAmelCase )
__lowercase= self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowerCAmelCase , top_spans=lowerCAmelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowerCAmelCase , start_index=lowerCAmelCase , end_index=lowerCAmelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(lowerCAmelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= []
for start_index, start_score in enumerate(lowerCAmelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
__lowercase= sorted(lowerCAmelCase , key=lambda lowerCAmelCase : x[1] , reverse=lowerCAmelCase )
__lowercase= []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f'Wrong span indices: [{start_index}:{end_index}]'
__lowercase= end_index - start_index + 1
assert length <= max_answer_length, f'Span is too long: {length} > {max_answer_length}'
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowerCAmelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(A_ )
class A ( A_ , A_ ):
UpperCamelCase_ : Optional[int] =VOCAB_FILES_NAMES
UpperCamelCase_ : List[str] =READER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Dict =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : Optional[Any] =READER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ : Union[str, Any] =['''input_ids''', '''attention_mask''']
UpperCamelCase_ : Dict =DPRReaderTokenizer
| 295 | 1 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training')
# TF training parameters
UpperCAmelCase : Union[str, Any] = False
UpperCAmelCase : str = False
def a__ ( a__ ):
"""simple docstring"""
return TrainCommand(lowerCAmelCase__ )
class lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@staticmethod
def UpperCAmelCase__ ( __SCREAMING_SNAKE_CASE : int ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = parser.add_parser("""train""" , help="""CLI tool to train a model on a task.""" )
train_parser.add_argument(
"""--train_data""" , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.""" , )
train_parser.add_argument(
"""--column_label""" , type=__SCREAMING_SNAKE_CASE , default=0 , help="""Column of the dataset csv file with example labels.""" )
train_parser.add_argument(
"""--column_text""" , type=__SCREAMING_SNAKE_CASE , default=1 , help="""Column of the dataset csv file with example texts.""" )
train_parser.add_argument(
"""--column_id""" , type=__SCREAMING_SNAKE_CASE , default=2 , help="""Column of the dataset csv file with example ids.""" )
train_parser.add_argument(
"""--skip_first_row""" , action="""store_true""" , help="""Skip the first row of the csv file (headers).""" )
train_parser.add_argument("""--validation_data""" , type=__SCREAMING_SNAKE_CASE , default="""""" , help="""path to validation dataset.""" )
train_parser.add_argument(
"""--validation_split""" , type=__SCREAMING_SNAKE_CASE , default=0.1 , help="""if validation dataset is not provided, fraction of train dataset to use as validation dataset.""" , )
train_parser.add_argument("""--output""" , type=__SCREAMING_SNAKE_CASE , default="""./""" , help="""path to saved the trained model.""" )
train_parser.add_argument(
"""--task""" , type=__SCREAMING_SNAKE_CASE , default="""text_classification""" , help="""Task to train the model on.""" )
train_parser.add_argument(
"""--model""" , type=__SCREAMING_SNAKE_CASE , default="""bert-base-uncased""" , help="""Model\'s name or path to stored model.""" )
train_parser.add_argument("""--train_batch_size""" , type=__SCREAMING_SNAKE_CASE , default=32 , help="""Batch size for training.""" )
train_parser.add_argument("""--valid_batch_size""" , type=__SCREAMING_SNAKE_CASE , default=64 , help="""Batch size for validation.""" )
train_parser.add_argument("""--learning_rate""" , type=__SCREAMING_SNAKE_CASE , default=3E-5 , help="""Learning rate.""" )
train_parser.add_argument("""--adam_epsilon""" , type=__SCREAMING_SNAKE_CASE , default=1E-08 , help="""Epsilon for Adam optimizer.""" )
train_parser.set_defaults(func=__SCREAMING_SNAKE_CASE )
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = logging.get_logger("""transformers-cli/training""" )
__SCREAMING_SNAKE_CASE = """tf""" if is_tf_available() else """torch"""
os.makedirs(args.output , exist_ok=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = args.output
__SCREAMING_SNAKE_CASE = args.column_label
__SCREAMING_SNAKE_CASE = args.column_text
__SCREAMING_SNAKE_CASE = args.column_id
self.logger.info(f'Loading {args.task} pipeline for {args.model}' )
if args.task == "text_classification":
__SCREAMING_SNAKE_CASE = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f'Loading dataset from {args.train_data}' )
__SCREAMING_SNAKE_CASE = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
__SCREAMING_SNAKE_CASE = None
if args.validation_data:
self.logger.info(f'Loading validation dataset from {args.validation_data}' )
__SCREAMING_SNAKE_CASE = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
__SCREAMING_SNAKE_CASE = args.validation_split
__SCREAMING_SNAKE_CASE = args.train_batch_size
__SCREAMING_SNAKE_CASE = args.valid_batch_size
__SCREAMING_SNAKE_CASE = args.learning_rate
__SCREAMING_SNAKE_CASE = args.adam_epsilon
def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def UpperCAmelCase__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
raise NotImplementedError
def UpperCAmelCase__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 366 |
'''simple docstring'''
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
UpperCAmelCase : Any = [
# tf -> hf
('/', '.'),
('layer_', 'layers.'),
('kernel', 'weight'),
('beta', 'bias'),
('gamma', 'weight'),
('pegasus', 'model'),
]
UpperCAmelCase : Optional[Any] = [
('.output.dense', '.fc2'),
('intermediate.LayerNorm', 'final_layer_norm'),
('intermediate.dense', 'fc1'),
]
UpperCAmelCase : Optional[int] = (
INIT_COMMON
+ [
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.out_proj'),
('attention.self', 'self_attn'),
('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'),
('attention.encdec_output.dense', 'encoder_attn.out_proj'),
('attention.encdec', 'encoder_attn'),
('key', 'k_proj'),
('value', 'v_proj'),
('query', 'q_proj'),
('decoder.LayerNorm', 'decoder.layernorm_embedding'),
]
+ END_COMMON
)
UpperCAmelCase : List[str] = (
INIT_COMMON
+ [
('embeddings.word_embeddings', 'shared.weight'),
('embeddings.position_embeddings', 'embed_positions.weight'),
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.output'),
('attention.self', 'self_attn.self'),
('encoder.LayerNorm', 'encoder.layernorm_embedding'),
]
+ END_COMMON
)
UpperCAmelCase : List[Any] = [
'encdec/key/bias',
'encdec/query/bias',
'encdec/value/bias',
'self/key/bias',
'self/query/bias',
'self/value/bias',
'encdec_output/dense/bias',
'attention/output/dense/bias',
]
def a__ ( a__ , a__ ):
"""simple docstring"""
for tf_name, hf_name in patterns:
__SCREAMING_SNAKE_CASE = k.replace(a__ , a__ )
return k
def a__ ( a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = BigBirdPegasusConfig(**a__ )
__SCREAMING_SNAKE_CASE = BigBirdPegasusForConditionalGeneration(a__ )
__SCREAMING_SNAKE_CASE = torch_model.state_dict()
__SCREAMING_SNAKE_CASE = {}
# separating decoder weights
__SCREAMING_SNAKE_CASE = {k: tf_weights[k] for k in tf_weights if k.startswith("""pegasus/decoder""" )}
__SCREAMING_SNAKE_CASE = {k: tf_weights[k] for k in tf_weights if not k.startswith("""pegasus/decoder""" )}
for k, v in tqdm(decoder_weights.items() , """tf -> hf conversion""" ):
__SCREAMING_SNAKE_CASE = [k.endswith(a__ ) for ending in KEYS_TO_IGNORE]
if any(a__ ):
continue
__SCREAMING_SNAKE_CASE = DECODER_PATTERNS
__SCREAMING_SNAKE_CASE = rename_state_dict_key(a__ , a__ )
if new_k not in state_dict:
raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ):
__SCREAMING_SNAKE_CASE = v.T
__SCREAMING_SNAKE_CASE = torch.from_numpy(a__ )
assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
for k, v in tqdm(remaining_weights.items() , """tf -> hf conversion""" ):
__SCREAMING_SNAKE_CASE = [k.endswith(a__ ) for ending in KEYS_TO_IGNORE]
if any(a__ ):
continue
__SCREAMING_SNAKE_CASE = REMAINING_PATTERNS
__SCREAMING_SNAKE_CASE = rename_state_dict_key(a__ , a__ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ):
__SCREAMING_SNAKE_CASE = v.T
__SCREAMING_SNAKE_CASE = torch.from_numpy(a__ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
__SCREAMING_SNAKE_CASE = mapping["""model.embed_positions.weight"""]
__SCREAMING_SNAKE_CASE = mapping.pop("""model.embed_positions.weight""" )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = torch_model.load_state_dict(a__ , strict=a__ )
__SCREAMING_SNAKE_CASE = [
k
for k in missing
if k
not in [
"""final_logits_bias""",
"""model.encoder.embed_tokens.weight""",
"""model.decoder.embed_tokens.weight""",
"""lm_head.weight""",
]
]
assert unexpected_missing == [], F'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], F'no matches found for the following tf keys {extra}'
return torch_model
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = tf.train.list_variables(a__ )
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = ["""global_step"""]
for name, shape in tqdm(a__ , desc="""converting tf checkpoint to dict""" ):
__SCREAMING_SNAKE_CASE = any(pat in name for pat in ignore_name )
if skip_key:
continue
__SCREAMING_SNAKE_CASE = tf.train.load_variable(a__ , a__ )
__SCREAMING_SNAKE_CASE = array
return tf_weights
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = get_tf_weights_as_numpy(a__ )
__SCREAMING_SNAKE_CASE = convert_bigbird_pegasus(a__ , a__ )
torch_model.save_pretrained(a__ )
if __name__ == "__main__":
UpperCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.')
UpperCAmelCase : int = parser.parse_args()
UpperCAmelCase : Dict = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 331 | 0 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''encoder.layer_norm_for_extract''': '''layer_norm_for_extract''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''label_embs_concat''': '''label_embeddings_concat''',
'''mask_emb''': '''masked_spec_embed''',
'''spk_proj''': '''speaker_proj''',
}
lowerCAmelCase__ = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
'''label_embeddings_concat''',
'''speaker_proj''',
'''layer_norm_for_extract''',
]
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
for attribute in key.split("." ):
lowercase__ : Union[str, Any] = getattr(lowerCamelCase__ , lowerCamelCase__ )
if weight_type is not None:
lowercase__ : Optional[int] = getattr(lowerCamelCase__ , lowerCamelCase__ ).shape
else:
lowercase__ : Optional[Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
lowercase__ : Tuple = value
elif weight_type == "weight_g":
lowercase__ : Optional[Any] = value
elif weight_type == "weight_v":
lowercase__ : List[str] = value
elif weight_type == "bias":
lowercase__ : Any = value
else:
lowercase__ : List[Any] = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Any = []
lowercase__ : Union[str, Any] = fairseq_model.state_dict()
lowercase__ : int = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
lowercase__ : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , hf_model.config.feat_extract_norm == "group" , )
lowercase__ : List[str] = True
else:
for key, mapped_key in MAPPING.items():
lowercase__ : List[Any] = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split("." )[:-1] ) != key):
# special case since naming is very similar
continue
lowercase__ : Optional[int] = True
if "*" in mapped_key:
lowercase__ : Any = name.split(lowerCamelCase__ )[0].split("." )[-2]
lowercase__ : Union[str, Any] = mapped_key.replace("*" , lowerCamelCase__ )
if "weight_g" in name:
lowercase__ : int = "weight_g"
elif "weight_v" in name:
lowercase__ : Any = "weight_v"
elif "bias" in name:
lowercase__ : int = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase__ : Optional[Any] = "weight"
else:
lowercase__ : List[Any] = None
set_recursively(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
continue
if not is_used:
unused_weights.append(lowerCamelCase__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Optional[int] = full_name.split("conv_layers." )[-1]
lowercase__ : str = name.split("." )
lowercase__ : Optional[int] = int(items[0] )
lowercase__ : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
lowercase__ : Optional[Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
lowercase__ : Optional[Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
lowercase__ : str = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
lowercase__ : Optional[int] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowerCamelCase__ )
@torch.no_grad()
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=True ):
"""simple docstring"""
if config_path is not None:
lowercase__ : Optional[Any] = UniSpeechSatConfig.from_pretrained(lowerCamelCase__ )
else:
lowercase__ : int = UniSpeechSatConfig()
lowercase__ : Optional[int] = ""
if is_finetuned:
lowercase__ : str = UniSpeechSatForCTC(lowerCamelCase__ )
else:
lowercase__ : List[Any] = UniSpeechSatForPreTraining(lowerCamelCase__ )
lowercase__ , lowercase__ , lowercase__ : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
lowercase__ : Dict = model[0].eval()
recursively_load_weights(lowerCamelCase__ , lowerCamelCase__ )
hf_wavavec.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
lowerCAmelCase__ = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 130 |
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
lowerCAmelCase__ = {
'''b0''': {
'''hidden_dim''': 1_2_8_0,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 2_2_4,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1_2_8_0,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 2_4_0,
'''dropout_rate''': 0.2,
'''dw_padding''': [1_6],
},
'''b2''': {
'''hidden_dim''': 1_4_0_8,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 2_6_0,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 1_6],
},
'''b3''': {
'''hidden_dim''': 1_5_3_6,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 3_0_0,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 1_8],
},
'''b4''': {
'''hidden_dim''': 1_7_9_2,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 3_8_0,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2_0_4_8,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 4_5_6,
'''dropout_rate''': 0.4,
'''dw_padding''': [1_3, 2_7],
},
'''b6''': {
'''hidden_dim''': 2_3_0_4,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 5_2_8,
'''dropout_rate''': 0.5,
'''dw_padding''': [3_1],
},
'''b7''': {
'''hidden_dim''': 2_5_6_0,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 6_0_0,
'''dropout_rate''': 0.5,
'''dw_padding''': [1_8],
},
}
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Dict = EfficientNetConfig()
lowercase__ : int = CONFIG_MAP[model_name]["hidden_dim"]
lowercase__ : Any = CONFIG_MAP[model_name]["width_coef"]
lowercase__ : Optional[Any] = CONFIG_MAP[model_name]["depth_coef"]
lowercase__ : List[str] = CONFIG_MAP[model_name]["image_size"]
lowercase__ : List[Any] = CONFIG_MAP[model_name]["dropout_rate"]
lowercase__ : Optional[int] = CONFIG_MAP[model_name]["dw_padding"]
lowercase__ : Optional[int] = "huggingface/label-files"
lowercase__ : Any = "imagenet-1k-id2label.json"
lowercase__ : List[Any] = 1_000
lowercase__ : List[Any] = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) )
lowercase__ : List[str] = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowercase__ : List[str] = idalabel
lowercase__ : Dict = {v: k for k, v in idalabel.items()}
return config
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase__ : str = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Tuple = CONFIG_MAP[model_name]["image_size"]
lowercase__ : Optional[int] = EfficientNetImageProcessor(
size={"height": size, "width": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=lowerCamelCase__ , )
return preprocessor
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Tuple = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
lowercase__ : Any = sorted(set(lowerCamelCase__ ) )
lowercase__ : List[Any] = len(lowerCamelCase__ )
lowercase__ : Dict = {b: str(lowerCamelCase__ ) for b, i in zip(lowerCamelCase__ , range(lowerCamelCase__ ) )}
lowercase__ : List[str] = []
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
lowercase__ : Tuple = block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
lowercase__ : List[str] = {}
for item in rename_keys:
if item[0] in original_param_names:
lowercase__ : Tuple = "efficientnet." + item[1]
lowercase__ : Union[str, Any] = "classifier.weight"
lowercase__ : List[Any] = "classifier.bias"
return key_mapping
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
for key, value in tf_params.items():
if "normalization" in key:
continue
lowercase__ : List[str] = key_mapping[key]
if "_conv" in key and "kernel" in key:
lowercase__ : int = torch.from_numpy(lowerCamelCase__ ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
lowercase__ : Any = torch.from_numpy(lowerCamelCase__ ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
lowercase__ : List[Any] = torch.from_numpy(np.transpose(lowerCamelCase__ ) )
else:
lowercase__ : Tuple = torch.from_numpy(lowerCamelCase__ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowerCamelCase__ )
@torch.no_grad()
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Optional[int] = model_classes[model_name](
include_top=lowerCamelCase__ , weights="imagenet" , input_tensor=lowerCamelCase__ , input_shape=lowerCamelCase__ , pooling=lowerCamelCase__ , classes=1_000 , classifier_activation="softmax" , )
lowercase__ : int = original_model.trainable_variables
lowercase__ : str = original_model.non_trainable_variables
lowercase__ : Optional[Any] = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
lowercase__ : Tuple = param.numpy()
lowercase__ : Optional[Any] = list(tf_params.keys() )
# Load HuggingFace model
lowercase__ : int = get_efficientnet_config(lowerCamelCase__ )
lowercase__ : int = EfficientNetForImageClassification(lowerCamelCase__ ).eval()
lowercase__ : Tuple = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
lowercase__ : str = rename_keys(lowerCamelCase__ )
replace_params(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Initialize preprocessor and preprocess input image
lowercase__ : Any = convert_image_processor(lowerCamelCase__ )
lowercase__ : int = preprocessor(images=prepare_img() , return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
lowercase__ : Optional[int] = hf_model(**lowerCamelCase__ )
lowercase__ : List[Any] = outputs.logits.detach().numpy()
# Original model inference
lowercase__ : Optional[int] = False
lowercase__ : Any = CONFIG_MAP[model_name]["image_size"]
lowercase__ : List[str] = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
lowercase__ : Union[str, Any] = image.img_to_array(lowerCamelCase__ )
lowercase__ : List[Any] = np.expand_dims(lowerCamelCase__ , axis=0 )
lowercase__ : List[Any] = original_model.predict(lowerCamelCase__ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowerCamelCase__ ):
os.mkdir(lowerCamelCase__ )
# Save converted model and image processor
hf_model.save_pretrained(lowerCamelCase__ )
preprocessor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
lowercase__ : List[str] = F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(lowerCamelCase__ )
hf_model.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
lowerCAmelCase__ = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 130 | 1 |
import numpy as np
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self) -> List[Any]:
__UpperCamelCase :List[str] = (0, 0)
__UpperCamelCase :int = None
__UpperCamelCase :Optional[int] = 0
__UpperCamelCase :Dict = 0
__UpperCamelCase :Optional[Any] = 0
def __eq__( self , __lowercase) -> Optional[Any]:
return self.position == cell.position
def UpperCamelCase__ ( self) -> Any:
print(self.position)
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self , __lowercase=(5, 5)) -> Optional[int]:
__UpperCamelCase :Dict = np.zeros(__lowercase)
__UpperCamelCase :Tuple = world_size[0]
__UpperCamelCase :List[Any] = world_size[1]
def UpperCamelCase__ ( self) -> List[Any]:
print(self.w)
def UpperCamelCase__ ( self , __lowercase) -> Optional[Any]:
__UpperCamelCase :Optional[int] = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
__UpperCamelCase :List[str] = cell.position[0]
__UpperCamelCase :Dict = cell.position[1]
__UpperCamelCase :int = []
for n in neughbour_cord:
__UpperCamelCase :int = current_x + n[0]
__UpperCamelCase :Tuple = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
__UpperCamelCase :int = Cell()
__UpperCamelCase :Optional[int] = (x, y)
__UpperCamelCase :Union[str, Any] = cell
neighbours.append(__lowercase)
return neighbours
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :str = []
__UpperCamelCase :str = []
_open.append(SCREAMING_SNAKE_CASE )
while _open:
__UpperCamelCase :int = np.argmin([n.f for n in _open] )
__UpperCamelCase :Dict = _open[min_f]
_closed.append(_open.pop(SCREAMING_SNAKE_CASE ) )
if current == goal:
break
for n in world.get_neigbours(SCREAMING_SNAKE_CASE ):
for c in _closed:
if c == n:
continue
__UpperCamelCase :List[Any] = current.g + 1
__UpperCamelCase , __UpperCamelCase :int = n.position
__UpperCamelCase , __UpperCamelCase :List[Any] = goal.position
__UpperCamelCase :List[str] = (ya - ya) ** 2 + (xa - xa) ** 2
__UpperCamelCase :List[str] = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(SCREAMING_SNAKE_CASE )
__UpperCamelCase :int = []
while current.parent is not None:
path.append(current.position )
__UpperCamelCase :Optional[int] = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
__lowercase = Gridworld()
# Start position and goal
__lowercase = Cell()
__lowercase = (0, 0)
__lowercase = Cell()
__lowercase = (4, 4)
print(F'path from {start.position} to {goal.position}')
__lowercase = astar(world, start, goal)
# Just for visual reasons.
for i in s:
__lowercase = 1
print(world.w)
| 105 | import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
__lowercase = '''bert-base-cased'''
__lowercase = '''google/pegasus-xsum'''
__lowercase = [''' Sam ate lunch today.''', '''Sams lunch ingredients.''']
__lowercase = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee''']
__lowercase = '''patrickvonplaten/t5-tiny-random'''
__lowercase = '''sshleifer/bart-tiny-random'''
__lowercase = '''sshleifer/tiny-mbart'''
__lowercase = '''sshleifer/tiny-marian-en-de'''
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :str = '''\n'''.join(SCREAMING_SNAKE_CASE )
Path(SCREAMING_SNAKE_CASE ).open('''w''' ).writelines(SCREAMING_SNAKE_CASE )
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(SCREAMING_SNAKE_CASE , f"""{split}.source""" ) , SCREAMING_SNAKE_CASE )
_dump_articles(os.path.join(SCREAMING_SNAKE_CASE , f"""{split}.target""" ) , SCREAMING_SNAKE_CASE )
return tmp_dir
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def UpperCamelCase__ ( self , __lowercase) -> List[Any]:
__UpperCamelCase :Dict = AutoTokenizer.from_pretrained(__lowercase)
__UpperCamelCase :Optional[Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir())
__UpperCamelCase :List[Any] = max(len(tokenizer.encode(__lowercase)) for a in ARTICLES)
__UpperCamelCase :Optional[int] = max(len(tokenizer.encode(__lowercase)) for a in SUMMARIES)
__UpperCamelCase :int = 4
__UpperCamelCase :Any = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
__UpperCamelCase , __UpperCamelCase :Tuple = '''ro_RO''', '''de_DE''' # ignored for all but mbart, but never causes error.
__UpperCamelCase :str = SeqaSeqDataset(
__lowercase , data_dir=__lowercase , type_path='''train''' , max_source_length=__lowercase , max_target_length=__lowercase , src_lang=__lowercase , tgt_lang=__lowercase , )
__UpperCamelCase :Any = DataLoader(__lowercase , batch_size=2 , collate_fn=train_dataset.collate_fn)
for batch in dataloader:
assert isinstance(__lowercase , __lowercase)
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
__UpperCamelCase :Optional[int] = shift_tokens_right(batch['''labels'''] , tokenizer.pad_token_id)
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED])
def UpperCamelCase__ ( self , __lowercase) -> int:
__UpperCamelCase :Union[str, Any] = AutoTokenizer.from_pretrained(__lowercase)
__UpperCamelCase :Union[str, Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir())
__UpperCamelCase :int = max(len(tokenizer.encode(__lowercase)) for a in ARTICLES)
__UpperCamelCase :Dict = max(len(tokenizer.encode(__lowercase)) for a in SUMMARIES)
__UpperCamelCase :Union[str, Any] = 4
__UpperCamelCase :List[str] = LegacySeqaSeqDataset(
__lowercase , data_dir=__lowercase , type_path='''train''' , max_source_length=20 , max_target_length=__lowercase , )
__UpperCamelCase :Dict = DataLoader(__lowercase , batch_size=2 , collate_fn=train_dataset.collate_fn)
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :List[Any] = AutoTokenizer.from_pretrained('''facebook/mbart-large-cc25''')
__UpperCamelCase :Union[str, Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()))
__UpperCamelCase :str = tmp_dir.joinpath('''train.source''').open().readlines()
__UpperCamelCase :int = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()))
pack_data_dir(__lowercase , __lowercase , 128 , __lowercase)
__UpperCamelCase :Union[str, Any] = {x.name for x in tmp_dir.iterdir()}
__UpperCamelCase :int = {x.name for x in save_dir.iterdir()}
__UpperCamelCase :Optional[int] = save_dir.joinpath('''train.source''').open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(__lowercase) < len(__lowercase)
assert len(__lowercase) == 1
assert len(packed_examples[0]) == sum(len(__lowercase) for x in orig_examples)
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='''This test requires fairseq''')
def UpperCamelCase__ ( self) -> List[Any]:
if not FAIRSEQ_AVAILABLE:
return
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Any = self._get_dataset(max_len=64)
__UpperCamelCase :Union[str, Any] = 64
__UpperCamelCase :Tuple = ds.make_dynamic_sampler(__lowercase , required_batch_size_multiple=__lowercase)
__UpperCamelCase :List[str] = [len(__lowercase) for x in batch_sampler]
assert len(set(__lowercase)) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(__lowercase) == len(__lowercase) # no dropped or added examples
__UpperCamelCase :int = DataLoader(__lowercase , batch_sampler=__lowercase , collate_fn=ds.collate_fn , num_workers=2)
__UpperCamelCase :List[str] = []
__UpperCamelCase :int = []
for batch in data_loader:
__UpperCamelCase :List[Any] = batch['''input_ids'''].shape
__UpperCamelCase :Dict = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
__UpperCamelCase :Optional[int] = np.product(batch['''input_ids'''].shape)
num_src_per_batch.append(__lowercase)
if num_src_tokens > (max_tokens * 1.1):
failures.append(__lowercase)
assert num_src_per_batch[0] == max(__lowercase)
if failures:
raise AssertionError(f"""too many tokens in {len(__lowercase)} batches""")
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Optional[Any] = self._get_dataset(max_len=512)
__UpperCamelCase :Any = 2
__UpperCamelCase :List[Any] = ds.make_sortish_sampler(__lowercase , shuffle=__lowercase)
__UpperCamelCase :List[Any] = DataLoader(__lowercase , batch_size=__lowercase , collate_fn=ds.collate_fn , num_workers=2)
__UpperCamelCase :Tuple = DataLoader(__lowercase , batch_size=__lowercase , collate_fn=ds.collate_fn , num_workers=2 , sampler=__lowercase)
__UpperCamelCase :int = tokenizer.pad_token_id
def count_pad_tokens(__lowercase , __lowercase="input_ids"):
return [batch[k].eq(__lowercase).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(__lowercase , k='''labels''')) < sum(count_pad_tokens(__lowercase , k='''labels'''))
assert sum(count_pad_tokens(__lowercase)) < sum(count_pad_tokens(__lowercase))
assert len(__lowercase) == len(__lowercase)
def UpperCamelCase__ ( self , __lowercase=1_000 , __lowercase=128) -> List[Any]:
if os.getenv('''USE_REAL_DATA''' , __lowercase):
__UpperCamelCase :Optional[Any] = '''examples/seq2seq/wmt_en_ro'''
__UpperCamelCase :Dict = max_len * 2 * 64
if not Path(__lowercase).joinpath('''train.len''').exists():
save_len_file(__lowercase , __lowercase)
else:
__UpperCamelCase :Union[str, Any] = '''examples/seq2seq/test_data/wmt_en_ro'''
__UpperCamelCase :Optional[int] = max_len * 4
save_len_file(__lowercase , __lowercase)
__UpperCamelCase :str = AutoTokenizer.from_pretrained(__lowercase)
__UpperCamelCase :List[Any] = SeqaSeqDataset(
__lowercase , data_dir=__lowercase , type_path='''train''' , max_source_length=__lowercase , max_target_length=__lowercase , n_obs=__lowercase , )
return ds, max_tokens, tokenizer
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :List[Any] = self._get_dataset()
__UpperCamelCase :List[str] = set(DistributedSortishSampler(__lowercase , 256 , num_replicas=2 , rank=0 , add_extra_examples=__lowercase))
__UpperCamelCase :Tuple = set(DistributedSortishSampler(__lowercase , 256 , num_replicas=2 , rank=1 , add_extra_examples=__lowercase))
assert idsa.intersection(__lowercase) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def UpperCamelCase__ ( self , __lowercase) -> List[Any]:
__UpperCamelCase :List[Any] = AutoTokenizer.from_pretrained(__lowercase , use_fast=__lowercase)
if tok_name == MBART_TINY:
__UpperCamelCase :Optional[Any] = SeqaSeqDataset(
__lowercase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) , type_path='''train''' , max_source_length=4 , max_target_length=8 , src_lang='''EN''' , tgt_lang='''FR''' , )
__UpperCamelCase :Tuple = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
__UpperCamelCase :Tuple = SeqaSeqDataset(
__lowercase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) , type_path='''train''' , max_source_length=4 , max_target_length=8 , )
__UpperCamelCase :Optional[int] = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(__lowercase) == 1 if tok_name == BART_TINY else len(__lowercase) == 0
| 105 | 1 |
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = [0 for i in range(r + 1 )]
# nc0 = 1
lowercase = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
lowercase = min(UpperCamelCase__ , UpperCamelCase__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 195 |
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''EncodecFeatureExtractor'''
snake_case_ = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = self.feature_extractor
__lowerCamelCase = False
def lowercase_ ( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=True ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.get_decoder_prompt_ids(task=lowerCamelCase__ , language=lowerCamelCase__ , no_timestamps=lowerCamelCase__ )
def __call__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase__ , **lowerCamelCase__ )
__lowerCamelCase = kwargs.pop('audio' , lowerCamelCase__ )
__lowerCamelCase = kwargs.pop('sampling_rate' , lowerCamelCase__ )
__lowerCamelCase = kwargs.pop('text' , lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
__lowerCamelCase = args[0]
__lowerCamelCase = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if text is not None:
__lowerCamelCase = self.tokenizer(lowerCamelCase__ , **lowerCamelCase__ )
if audio is not None:
__lowerCamelCase = self.feature_extractor(lowerCamelCase__ , *lowerCamelCase__ , sampling_rate=lowerCamelCase__ , **lowerCamelCase__ )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
__lowerCamelCase = audio_inputs['input_values']
if "padding_mask" in audio_inputs:
__lowerCamelCase = audio_inputs['padding_mask']
return inputs
def lowercase_ ( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = kwargs.pop('audio' , lowerCamelCase__ )
__lowerCamelCase = kwargs.pop('padding_mask' , lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
__lowerCamelCase = args[0]
__lowerCamelCase = args[1:]
if audio_values is not None:
return self._decode_audio(lowerCamelCase__ , padding_mask=lowerCamelCase__ )
else:
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def lowercase_ ( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> List[np.ndarray]:
'''simple docstring'''
__lowerCamelCase = to_numpy(lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = audio_values.shape
if padding_mask is None:
return list(lowerCamelCase__ )
__lowerCamelCase = to_numpy(lowerCamelCase__ )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
__lowerCamelCase = seq_len - padding_mask.shape[-1]
__lowerCamelCase = 1 - self.feature_extractor.padding_value
__lowerCamelCase = np.pad(lowerCamelCase__ , ((0, 0), (0, difference)) , 'constant' , constant_values=lowerCamelCase__ )
__lowerCamelCase = audio_values.tolist()
for i in range(lowerCamelCase__ ):
__lowerCamelCase = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
__lowerCamelCase = sliced_audio.reshape(lowerCamelCase__ , -1 )
return audio_values
| 90 | 0 |
"""simple docstring"""
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class __snake_case :
@staticmethod
def lowerCamelCase_ ( *lowercase , **lowercase) -> Dict:
'''simple docstring'''
pass
def __a ( _SCREAMING_SNAKE_CASE ) ->str:
a__: List[str] = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __snake_case ( unittest.TestCase ):
a__ = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase) -> Optional[int]:
'''simple docstring'''
a__: Tuple = DepthEstimationPipeline(model=lowercase , image_processor=lowercase)
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCamelCase_ ( self , lowercase , lowercase) -> List[str]:
'''simple docstring'''
a__: Union[str, Any] = depth_estimator('./tests/fixtures/tests_samples/COCO/000000039769.png')
self.assertEqual({'predicted_depth': ANY(torch.Tensor), 'depth': ANY(Image.Image)} , lowercase)
import datasets
a__: Dict = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test')
a__: Optional[int] = depth_estimator(
[
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png'),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
])
self.assertEqual(
[
{'predicted_depth': ANY(torch.Tensor), 'depth': ANY(Image.Image)},
{'predicted_depth': ANY(torch.Tensor), 'depth': ANY(Image.Image)},
{'predicted_depth': ANY(torch.Tensor), 'depth': ANY(Image.Image)},
{'predicted_depth': ANY(torch.Tensor), 'depth': ANY(Image.Image)},
{'predicted_depth': ANY(torch.Tensor), 'depth': ANY(Image.Image)},
] , lowercase , )
@require_tf
@unittest.skip('Depth estimation is not implemented in TF')
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
pass
@slow
@require_torch
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: Optional[int] = 'Intel/dpt-large'
a__: Union[str, Any] = pipeline('depth-estimation' , model=lowercase)
a__: Optional[Any] = depth_estimator('http://images.cocodataset.org/val2017/000000039769.jpg')
a__: List[str] = hashimage(outputs['depth'])
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['predicted_depth'].max().item()) , 29.304)
self.assertEqual(nested_simplify(outputs['predicted_depth'].min().item()) , 2.662)
@require_torch
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
self.skipTest('There is not hf-internal-testing tiny model for either GLPN nor DPT')
| 203 | """simple docstring"""
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
a__ = DebertaTokenizer
a__ = True
a__ = DebertaTokenizerFast
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a__: List[str] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'[UNK]',
]
a__: List[str] = dict(zip(lowercase , range(len(lowercase))))
a__: Any = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
a__: Optional[Any] = {'unk_token': '[UNK]'}
a__: Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
a__: Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(lowercase) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(lowercase))
def lowerCamelCase_ ( self , **lowercase) -> int:
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase)
def lowerCamelCase_ ( self , lowercase) -> Dict:
'''simple docstring'''
a__: Tuple = 'lower newer'
a__: int = 'lower newer'
return input_text, output_text
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
a__: Optional[int] = self.get_tokenizer()
a__: List[Any] = 'lower newer'
a__: Optional[int] = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
a__: Optional[Any] = tokenizer.tokenize(lowercase)
self.assertListEqual(lowercase , lowercase)
a__: int = tokens + [tokenizer.unk_token]
a__: Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase) , lowercase)
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__: int = self.get_tokenizer()
a__: Any = tokenizer('Hello' , 'World')
a__: Optional[Any] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['token_type_ids'] , lowercase)
@slow
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__: Dict = self.tokenizer_class.from_pretrained('microsoft/deberta-base')
a__: Union[str, Any] = tokenizer.encode('sequence builders' , add_special_tokens=lowercase)
a__: Optional[Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=lowercase)
a__: List[str] = tokenizer.encode(
'sequence builders' , add_special_tokens=lowercase , add_prefix_space=lowercase)
a__: Any = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=lowercase , add_prefix_space=lowercase)
a__: Optional[int] = tokenizer.build_inputs_with_special_tokens(lowercase)
a__: Union[str, Any] = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase)
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: List[Any] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class)
for tokenizer_class in tokenizer_classes:
a__: int = tokenizer_class.from_pretrained('microsoft/deberta-base')
a__: List[str] = [
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
a__: Dict = tokenizer(lowercase , padding=lowercase)
a__: Union[str, Any] = [tokenizer.decode(lowercase , skip_special_tokens=lowercase) for seq in encoding['input_ids']]
# fmt: off
a__: Any = {
'input_ids': [
[1, 21_18, 1_11_26, 5_65, 35, 83, 2_51_91, 1_63, 1_88_54, 13, 1_21_56, 12, 1_61_01, 2_53_76, 1_38_07, 9, 2_22_05, 2_78_93, 16_35, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 21_18, 1_11_26, 5_65, 2_45_36, 80, 4_37_97, 48_78, 73_73, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1_33, 78, 65, 16, 10, 37_24, 15_38, 3_31_83, 1_13_03, 4_37_97, 19_38, 4, 8_70, 2_41_65, 2_91_05, 5, 7_39, 3_26_44, 3_31_83, 1_13_03, 3_61_73, 88, 80, 6_50, 78_21, 4_59_40, 6, 52, 25_59, 5, 18_36, 9, 5, 73_97, 1_31_71, 31, 5, 18_36, 9, 3_26_44, 3_31_83, 1_13_03, 4, 2]
],
'token_type_ids': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
a__: str = [
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
self.assertDictEqual(encoding.data , lowercase)
for expected, decoded in zip(lowercase , lowercase):
self.assertEqual(lowercase , lowercase)
| 203 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowercase_ : Union[str, Any] = AltDiffusionPipeline
lowercase_ : Optional[Any] = TEXT_TO_IMAGE_PARAMS
lowercase_ : Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase_ : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
lowercase_ : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : List[Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , )
A_ : Union[str, Any] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , )
torch.manual_seed(0 )
A_ : Optional[int] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
A_ : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_2 , )
A_ : List[Any] = CLIPTextModel(snake_case_ )
A_ : List[str] = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
A_ : List[str] = 7_7
A_ : int = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowerCamelCase_ ( self , snake_case_ , snake_case_=0 ):
"""simple docstring"""
if str(snake_case_ ).startswith('mps' ):
A_ : Optional[Any] = torch.manual_seed(snake_case_ )
else:
A_ : Union[str, Any] = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
A_ : int = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowerCamelCase_ ( self ):
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def lowerCamelCase_ ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
A_ : List[str] = self.get_dummy_components()
torch.manual_seed(0 )
A_ : str = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , )
# TODO: remove after fixing the non-deterministic text encoder
A_ : Tuple = RobertaSeriesModelWithTransformation(snake_case_ )
A_ : str = text_encoder
A_ : Tuple = AltDiffusionPipeline(**snake_case_ )
A_ : Union[str, Any] = alt_pipe.to(snake_case_ )
alt_pipe.set_progress_bar_config(disable=snake_case_ )
A_ : str = self.get_dummy_inputs(snake_case_ )
A_ : Optional[int] = 'A photo of an astronaut'
A_ : Optional[int] = alt_pipe(**snake_case_ )
A_ : Dict = output.images
A_ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
A_ : Optional[int] = np.array(
[0.5_74_81_62, 0.60_44_71_45, 0.48_82_12_17, 0.50_10_06_36, 0.5_43_11_85, 0.45_76_36_83, 0.49_65_76_96, 0.48_13_27_33, 0.47_57_30_93] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : str = 'cpu' # ensure determinism for the device-dependent torch.Generator
A_ : str = self.get_dummy_components()
A_ : Union[str, Any] = PNDMScheduler(skip_prk_steps=snake_case_ )
torch.manual_seed(0 )
A_ : Any = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , )
# TODO: remove after fixing the non-deterministic text encoder
A_ : Optional[Any] = RobertaSeriesModelWithTransformation(snake_case_ )
A_ : int = text_encoder
A_ : Union[str, Any] = AltDiffusionPipeline(**snake_case_ )
A_ : Any = alt_pipe.to(snake_case_ )
alt_pipe.set_progress_bar_config(disable=snake_case_ )
A_ : Any = self.get_dummy_inputs(snake_case_ )
A_ : List[str] = alt_pipe(**snake_case_ )
A_ : Tuple = output.images
A_ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
A_ : Any = np.array(
[0.51_60_50_93, 0.5_70_72_41, 0.47_36_55_07, 0.50_57_88_86, 0.5_63_38_77, 0.4_64_25_03, 0.5_18_20_81, 0.48_76_34_84, 0.49_08_42_37] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion' , safety_checker=snake_case_ )
A_ : List[str] = alt_pipe.to(snake_case_ )
alt_pipe.set_progress_bar_config(disable=snake_case_ )
A_ : Optional[Any] = 'A painting of a squirrel eating a burger'
A_ : List[str] = torch.manual_seed(0 )
A_ : Tuple = alt_pipe([prompt] , generator=snake_case_ , guidance_scale=6.0 , num_inference_steps=2_0 , output_type='np' )
A_ : List[Any] = output.images
A_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
A_ : Union[str, Any] = np.array([0.10_10, 0.08_00, 0.07_94, 0.08_85, 0.08_43, 0.07_62, 0.07_69, 0.07_29, 0.05_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = DDIMScheduler.from_pretrained('BAAI/AltDiffusion' , subfolder='scheduler' )
A_ : Union[str, Any] = AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion' , scheduler=snake_case_ , safety_checker=snake_case_ )
A_ : List[Any] = alt_pipe.to(snake_case_ )
alt_pipe.set_progress_bar_config(disable=snake_case_ )
A_ : List[Any] = 'A painting of a squirrel eating a burger'
A_ : Optional[Any] = torch.manual_seed(0 )
A_ : Optional[int] = alt_pipe([prompt] , generator=snake_case_ , num_inference_steps=2 , output_type='numpy' )
A_ : List[Any] = output.images
A_ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
A_ : Union[str, Any] = np.array([0.40_19, 0.40_52, 0.38_10, 0.41_19, 0.39_16, 0.39_82, 0.46_51, 0.41_95, 0.53_23] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 286 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : List[str] = {'configuration_xlnet': ['XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : str = ['XLNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[str] = ['XLNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : int = [
'XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLNetForMultipleChoice',
'XLNetForQuestionAnswering',
'XLNetForQuestionAnsweringSimple',
'XLNetForSequenceClassification',
'XLNetForTokenClassification',
'XLNetLMHeadModel',
'XLNetModel',
'XLNetPreTrainedModel',
'load_tf_weights_in_xlnet',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Union[str, Any] = [
'TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLNetForMultipleChoice',
'TFXLNetForQuestionAnsweringSimple',
'TFXLNetForSequenceClassification',
'TFXLNetForTokenClassification',
'TFXLNetLMHeadModel',
'TFXLNetMainLayer',
'TFXLNetModel',
'TFXLNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 286 | 1 |
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format='''%(message)s''')
def __UpperCAmelCase ( __a : np.ndarray ) -> np.ndarray:
"""simple docstring"""
return input_array.reshape((input_array.size, 1) )
def __UpperCAmelCase ( __a : np.ndarray ,__a : np.ndarray ,__a : int ) -> np.ndarray:
"""simple docstring"""
_a : List[str] = np.nan
for i in range(_UpperCamelCase ):
_a : Any = features[:, labels == i]
_a : Any = data.mean(1 )
# Centralize the data of class i
_a : Optional[Any] = data - column_reshape(_UpperCamelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(_UpperCamelCase ,centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
_a : Any = np.dot(_UpperCamelCase ,centered_data.T )
return covariance_sum / features.shape[1]
def __UpperCAmelCase ( __a : np.ndarray ,__a : np.ndarray ,__a : int ) -> np.ndarray:
"""simple docstring"""
_a : Optional[Any] = features.mean(1 )
_a : str = np.nan
for i in range(_UpperCamelCase ):
_a : Optional[Any] = features[:, labels == i]
_a : Any = data.shape[1]
_a : Dict = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase ) ,(column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase )).T ,)
else:
# If covariance_sum is np.nan (i.e. first loop)
_a : Any = device_data * np.dot(
column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase ) ,(column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase )).T ,)
return covariance_sum / features.shape[1]
def __UpperCAmelCase ( __a : np.ndarray ,__a : int ) -> np.ndarray:
"""simple docstring"""
if features.any():
_a : List[Any] = features.mean(1 )
# Center the dataset
_a : Union[str, Any] = features - np.reshape(_UpperCamelCase ,(data_mean.size, 1) )
_a : Optional[int] = np.dot(_UpperCamelCase ,centered_data.T ) / features.shape[1]
_a , _a : Optional[Any] = np.linalg.eigh(_UpperCamelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
_a : Optional[Any] = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
_a : Tuple = np.dot(filtered_eigenvectors.T ,_UpperCamelCase )
logging.info('''Principal Component Analysis computed''' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR ,format='''%(message)s''' ,force=_UpperCamelCase )
logging.error('''Dataset empty''' )
raise AssertionError
def __UpperCAmelCase ( __a : np.ndarray ,__a : np.ndarray ,__a : int ,__a : int ) -> np.ndarray:
"""simple docstring"""
assert classes > dimensions
# Check if features have been already loaded
if features.any:
_a , _a : int = eigh(
covariance_between_classes(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ) ,covariance_within_classes(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ) ,)
_a : Any = eigenvectors[:, ::-1][:, :dimensions]
_a , _a , _a : List[Any] = np.linalg.svd(_UpperCamelCase )
_a : int = svd_matrix[:, 0:dimensions]
_a : Any = np.dot(filtered_svd_matrix.T ,_UpperCamelCase )
logging.info('''Linear Discriminant Analysis computed''' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR ,format='''%(message)s''' ,force=_UpperCamelCase )
logging.error('''Dataset empty''' )
raise AssertionError
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
_a : Union[str, Any] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
_a : str = np.array([0, 0, 0, 1, 1] )
_a : Optional[Any] = 2
_a : Tuple = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(_UpperCamelCase ) as error_info:
_a : Optional[Any] = linear_discriminant_analysis(
_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
if isinstance(_UpperCamelCase ,np.ndarray ):
raise AssertionError(
'''Did not raise AssertionError for dimensions > classes''' )
assert error_info.type is AssertionError
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
_a : Optional[Any] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
_a : Dict = 2
_a : List[Any] = np.array([[6.92_82_03_23, 8.66_02_54_04, 10.39_230_485], [3.0, 3.0, 3.0]] )
with pytest.raises(_UpperCamelCase ) as error_info:
_a : str = principal_component_analysis(_UpperCamelCase ,_UpperCamelCase )
if not np.allclose(_UpperCamelCase ,_UpperCamelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 358 |
def __UpperCAmelCase ( __a : int ) -> int:
"""simple docstring"""
if n == 1 or not isinstance(__a ,__a ):
return 0
elif n == 2:
return 1
else:
_a : Any = [0, 1]
for i in range(2 ,n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def __UpperCAmelCase ( __a : int ) -> int:
"""simple docstring"""
_a : Any = 0
_a : Dict = 2
while digits < n:
index += 1
_a : Dict = len(str(fibonacci(__a ) ) )
return index
def __UpperCAmelCase ( __a : int = 1_000 ) -> int:
"""simple docstring"""
return fibonacci_digits_index(__a )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 15 | 0 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def _A ( A__ ):
"""simple docstring"""
__lowercase , __lowercase = image.size
__lowercase , __lowercase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__lowercase = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
__lowercase = np.array(A__ ).astype(np.floataa ) / 2_5_5.0
__lowercase = image[None].transpose(0 , 3 , 1 , 2 )
__lowercase = torch.from_numpy(A__ )
return 2.0 * image - 1.0
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : Union[str, Any] ,lowercase__ : VQModel ,lowercase__ : UNetaDModel ,lowercase__ : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] ,):
super().__init__()
self.register_modules(vqvae=lowercase__ ,unet=lowercase__ ,scheduler=lowercase__ )
@torch.no_grad()
def __call__( self : Dict ,lowercase__ : Union[torch.Tensor, PIL.Image.Image] = None ,lowercase__ : Optional[int] = 1 ,lowercase__ : Optional[int] = 1_0_0 ,lowercase__ : Optional[float] = 0.0 ,lowercase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,lowercase__ : Optional[str] = "pil" ,lowercase__ : bool = True ,):
if isinstance(lowercase__ ,PIL.Image.Image ):
__lowercase = 1
elif isinstance(lowercase__ ,torch.Tensor ):
__lowercase = image.shape[0]
else:
raise ValueError(F"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(lowercase__ )}" )
if isinstance(lowercase__ ,PIL.Image.Image ):
__lowercase = preprocess(lowercase__ )
__lowercase , __lowercase = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
__lowercase = (batch_size, self.unet.config.in_channels // 2, height, width)
__lowercase = next(self.unet.parameters() ).dtype
__lowercase = randn_tensor(lowercase__ ,generator=lowercase__ ,device=self.device ,dtype=lowercase__ )
__lowercase = image.to(device=self.device ,dtype=lowercase__ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(lowercase__ ,device=self.device )
__lowercase = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
__lowercase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__lowercase = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__lowercase = {}
if accepts_eta:
__lowercase = eta
for t in self.progress_bar(lowercase__ ):
# concat latents and low resolution image in the channel dimension.
__lowercase = torch.cat([latents, image] ,dim=1 )
__lowercase = self.scheduler.scale_model_input(lowercase__ ,lowercase__ )
# predict the noise residual
__lowercase = self.unet(lowercase__ ,lowercase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
__lowercase = self.scheduler.step(lowercase__ ,lowercase__ ,lowercase__ ,**lowercase__ ).prev_sample
# decode the image latents with the VQVAE
__lowercase = self.vqvae.decode(lowercase__ ).sample
__lowercase = torch.clamp(lowercase__ ,-1.0 ,1.0 )
__lowercase = image / 2 + 0.5
__lowercase = image.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
__lowercase = self.numpy_to_pil(lowercase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase__ )
| 104 |
'''simple docstring'''
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
lowerCAmelCase :List[str] = yaml.safe_load(
'''\
name: ""
allow_empty: false
allow_empty_text: true
subsections:
- name: "Dataset Card for X" # First-level markdown heading
allow_empty: false
allow_empty_text: true
subsections:
- name: "Table of Contents"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Dataset Description"
allow_empty: false
allow_empty_text: false
subsections:
- name: "Dataset Summary"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Supported Tasks and Leaderboards"
allow_empty: true
allow_empty_text: true
subsections: null
- name: Languages
allow_empty: false
allow_empty_text: true
subsections: null
'''
)
lowerCAmelCase :List[Any] = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
lowerCAmelCase :Union[str, Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :List[Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
#### Extra Ignored Subsection
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :Tuple = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Extra Ignored Subsection''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
}
],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
lowerCAmelCase :Optional[Any] = '''\
---
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :Optional[int] = (
'''The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'''
)
lowerCAmelCase :Tuple = '''\
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :Union[str, Any] = (
'''The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'''
)
lowerCAmelCase :Dict = '''\
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :Dict = '''The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'''
lowerCAmelCase :Optional[Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :int = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'''
lowerCAmelCase :int = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
'''
lowerCAmelCase :List[str] = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'''
lowerCAmelCase :List[Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Languages
Language Text
'''
lowerCAmelCase :List[str] = '''The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'''
lowerCAmelCase :int = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
'''
lowerCAmelCase :Dict = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'''
lowerCAmelCase :Tuple = '''\
---
language:
- zh
- en
---
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :Optional[Any] = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'''
lowerCAmelCase :Any = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
# Dataset Card My Dataset
'''
lowerCAmelCase :Dict = '''The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'''
lowerCAmelCase :Tuple = '''\
---
language:
- zh
- en
---
# Dataset Card My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :str = '''The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'''
lowerCAmelCase :Any = ''''''
lowerCAmelCase :Any = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'''
lowerCAmelCase :List[Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :str = '''The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'''
@pytest.mark.parametrize(
'readme_md, expected_dict' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
assert ReadMe.from_string(lowerCAmelCase , lowerCAmelCase ).to_dict() == expected_dict
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict ):
"""simple docstring"""
with pytest.raises(lowerCAmelCase , match=re.escape(expected_error.format(path='root' ) ) ):
__magic_name__ : str = ReadMe.from_string(lowerCAmelCase , lowerCAmelCase )
readme.validate()
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase ( lowerCAmelCase : List[str] , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
with pytest.raises(lowerCAmelCase , match=re.escape(expected_error.format(path='root' ) ) ):
ReadMe.from_string(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
'readme_md,' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase ( lowerCAmelCase : Tuple ):
"""simple docstring"""
ReadMe.from_string(lowerCAmelCase , lowerCAmelCase , suppress_parsing_errors=lowerCAmelCase )
@pytest.mark.parametrize(
'readme_md, expected_dict' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def lowerCamelCase ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ : Optional[Any] = Path(lowerCAmelCase ) / 'README.md'
with open(lowerCAmelCase , 'w+' ) as readme_file:
readme_file.write(lowerCAmelCase )
__magic_name__ : Optional[int] = ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : List[Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ : Union[str, Any] = Path(lowerCAmelCase ) / 'README.md'
with open(lowerCAmelCase , 'w+' ) as readme_file:
readme_file.write(lowerCAmelCase )
__magic_name__ : str = expected_error.format(path=lowerCAmelCase )
with pytest.raises(lowerCAmelCase , match=re.escape(lowerCAmelCase ) ):
__magic_name__ : int = ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase )
readme.validate()
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : str ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ : Optional[int] = Path(lowerCAmelCase ) / 'README.md'
with open(lowerCAmelCase , 'w+' ) as readme_file:
readme_file.write(lowerCAmelCase )
__magic_name__ : Any = expected_error.format(path=lowerCAmelCase )
with pytest.raises(lowerCAmelCase , match=re.escape(lowerCAmelCase ) ):
ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
'readme_md,' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase ( lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ : Any = Path(lowerCAmelCase ) / 'README.md'
with open(lowerCAmelCase , 'w+' ) as readme_file:
readme_file.write(lowerCAmelCase )
ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase , suppress_parsing_errors=lowerCAmelCase ) | 331 | 0 |
"""simple docstring"""
import os
from pathlib import Path
def __lowerCamelCase ( ) -> List[str]:
from torch.utils.cpp_extension import load
__SCREAMING_SNAKE_CASE :List[str] = Path(a_ ).resolve().parent.parent.parent / '''kernels''' / '''deformable_detr'''
__SCREAMING_SNAKE_CASE :List[str] = [
root / filename
for filename in [
'''vision.cpp''',
os.path.join('''cpu''' , '''ms_deform_attn_cpu.cpp''' ),
os.path.join('''cuda''' , '''ms_deform_attn_cuda.cu''' ),
]
]
load(
'''MultiScaleDeformableAttention''' , a_ , with_cuda=a_ , extra_include_paths=[str(a_ )] , extra_cflags=['''-DWITH_CUDA=1'''] , extra_cuda_cflags=[
'''-DCUDA_HAS_FP16=1''',
'''-D__CUDA_NO_HALF_OPERATORS__''',
'''-D__CUDA_NO_HALF_CONVERSIONS__''',
'''-D__CUDA_NO_HALF2_OPERATORS__''',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA | 360 |
"""simple docstring"""
from __future__ import annotations
import math
def __lowerCamelCase ( a_ : int , a_ : int , a_ : bool , a_ : list[int] , a_ : float ) -> int:
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if len(a_ ) == 0:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , a_ , a_ , a_ ) , minimax(depth + 1 , node_index * 2 + 1 , a_ , a_ , a_ ) , )
return min(
minimax(depth + 1 , node_index * 2 , a_ , a_ , a_ ) , minimax(depth + 1 , node_index * 2 + 1 , a_ , a_ , a_ ) , )
def __lowerCamelCase ( ) -> None:
__SCREAMING_SNAKE_CASE :Dict = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
__SCREAMING_SNAKE_CASE :Optional[int] = math.log(len(a_ ) , 2 )
print('''Optimal value : ''' , end='''''' )
print(minimax(0 , 0 , a_ , a_ , a_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 239 | 0 |
"""simple docstring"""
import os
import pytest
from transformers.dynamic_module_utils import get_imports
a : Optional[int] = '''
import os
'''
a : List[Any] = '''
def foo():
import os
return False
'''
a : Tuple = '''
def foo():
def bar():
if True:
import os
return False
return bar()
'''
a : Any = '''
import os
try:
import bar
except ImportError:
raise ValueError()
'''
a : List[str] = '''
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
'''
a : Any = '''
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
'''
a : Union[str, Any] = '''
import os
try:
import bar
except ImportError as e:
raise ValueError()
'''
a : Optional[Any] = '''
import os
try:
import bar
except:
raise ValueError()
'''
a : List[Any] = '''
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
'''
a : Optional[int] = '''
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
'''
a : Union[str, Any] = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize("case" , _lowercase )
def _SCREAMING_SNAKE_CASE ( _lowercase : Dict , _lowercase : Tuple ) ->Union[str, Any]:
'''simple docstring'''
a : List[str] = os.path.join(_lowercase , "test_file.py" )
with open(_lowercase , "w" ) as _tmp_file:
_tmp_file.write(_lowercase )
a : str = get_imports(_lowercase )
assert parsed_imports == ["os"]
| 105 |
"""simple docstring"""
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __UpperCamelCase ( a__ , a__ ):
@register_to_config
def __init__( self , *,
lowerCAmelCase__ = 4 , lowerCAmelCase__ = 768 , lowerCAmelCase__ , lowerCAmelCase__ , ) -> Optional[Any]:
super().__init__()
a : Tuple = nn.Parameter(torch.zeros(lowerCAmelCase__ ) )
# parameters for additional clip time embeddings
a : str = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ )
a : Any = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ )
# parameters for encoder hidden states
a : int = clip_extra_context_tokens
a : int = nn.Linear(
lowerCAmelCase__ , self.clip_extra_context_tokens * cross_attention_dim )
a : Any = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ )
a : str = nn.LayerNorm(lowerCAmelCase__ )
def __a ( self , *, lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
a : str = image_embeddings.shape[0]
a : Optional[int] = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
a : Any = classifier_free_guidance_embeddings.expand(
lowerCAmelCase__ , -1 )
a : Any = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
a : List[str] = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
a : Dict = self.embedding_proj(lowerCAmelCase__ )
a : List[str] = self.clip_image_embeddings_project_to_time_embeddings(lowerCAmelCase__ )
a : Dict = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
a : Union[str, Any] = self.clip_extra_context_tokens_proj(lowerCAmelCase__ )
a : List[str] = clip_extra_context_tokens.reshape(lowerCAmelCase__ , -1 , self.clip_extra_context_tokens )
a : Optional[Any] = clip_extra_context_tokens.permute(0 , 2 , 1 )
a : Optional[int] = self.encoder_hidden_states_proj(lowerCAmelCase__ )
a : str = self.text_encoder_hidden_states_norm(lowerCAmelCase__ )
a : List[str] = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 105 | 1 |
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
a =False
class A_ ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Union[str, Any] = VersatileDiffusionImageVariationPipeline.from_pretrained('shi-labs/versatile-diffusion')
pipe.to(SCREAMING_SNAKE_CASE__)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg')
__lowerCamelCase : Dict = torch.manual_seed(0)
__lowerCamelCase : Optional[Any] = pipe(
image=SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,guidance_scale=7.5 ,num_inference_steps=5_0 ,output_type='numpy' ,).images
__lowerCamelCase : Optional[Any] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowerCamelCase : List[str] = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
| 113 |
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : List[Any] = logging.get_logger()
# the current default level is logging.WARNING
__lowerCamelCase : Dict = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() ,logging.get_verbosity())
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() ,logging.get_verbosity())
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() ,logging.get_verbosity())
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() ,logging.get_verbosity())
# restore to the original level
logging.set_verbosity(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Optional[int] = logging.get_verbosity()
__lowerCamelCase : str = logging.get_logger('transformers.models.bart.tokenization_bart')
__lowerCamelCase : Tuple = 'Testing 1, 2, 3'
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(SCREAMING_SNAKE_CASE__) as cl:
logger.warning(SCREAMING_SNAKE_CASE__)
self.assertEqual(cl.out ,msg + '\n')
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(SCREAMING_SNAKE_CASE__) as cl:
logger.warning(SCREAMING_SNAKE_CASE__)
self.assertEqual(cl.out ,'')
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(SCREAMING_SNAKE_CASE__) as cl:
logger.warning(SCREAMING_SNAKE_CASE__)
self.assertEqual(cl.out ,msg + '\n')
# restore to the original level
logging.set_verbosity(SCREAMING_SNAKE_CASE__)
@mockenv(TRANSFORMERS_VERBOSITY='error')
def lowerCAmelCase ( self : Tuple):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
__lowerCamelCase : int = logging.get_logger('transformers.models.bart.tokenization_bart')
__lowerCamelCase : int = os.getenv('TRANSFORMERS_VERBOSITY' ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = logging.log_levels[env_level_str]
__lowerCamelCase : Tuple = logging.get_verbosity()
self.assertEqual(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,F"TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}" ,)
# restore to the original level
__lowerCamelCase : List[str] = ''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='super-error')
def lowerCAmelCase ( self : List[Any]):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
__lowerCamelCase : List[str] = logging.logging.getLogger()
with CaptureLogger(SCREAMING_SNAKE_CASE__) as cl:
# this action activates the env var
logging.get_logger('transformers.models.bart.tokenization_bart')
self.assertIn('Unknown option TRANSFORMERS_VERBOSITY=super-error' ,cl.out)
# no need to restore as nothing was changed
def lowerCAmelCase ( self : Any):
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
__lowerCamelCase : Tuple = logging.get_logger('transformers.models.bart.tokenization_bart')
__lowerCamelCase : Optional[int] = 'Testing 1, 2, 3'
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='1'):
# nothing should be logged as env var disables this method
with CaptureLogger(SCREAMING_SNAKE_CASE__) as cl:
logger.warning_advice(SCREAMING_SNAKE_CASE__)
self.assertEqual(cl.out ,'')
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS=''):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(SCREAMING_SNAKE_CASE__) as cl:
logger.warning_advice(SCREAMING_SNAKE_CASE__)
self.assertEqual(cl.out ,msg + '\n')
def SCREAMING_SNAKE_CASE__ ( ) -> Any:
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 113 | 1 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
__snake_case = False
class _lowerCAmelCase ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
snake_case : Dict = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
snake_case : List[str] = "A painting of a squirrel eating a burger "
snake_case : int = torch.manual_seed(0 )
snake_case : int = pipe(
prompt=UpperCamelCase__ , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCamelCase__ )
snake_case : List[str] = VersatileDiffusionTextToImagePipeline.from_pretrained(UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
snake_case : Dict = generator.manual_seed(0 )
snake_case : List[str] = pipe(
prompt=UpperCamelCase__ , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case : Tuple = VersatileDiffusionTextToImagePipeline.from_pretrained(
"shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
snake_case : Optional[Any] = "A painting of a squirrel eating a burger "
snake_case : Optional[Any] = torch.manual_seed(0 )
snake_case : Tuple = pipe(
prompt=UpperCamelCase__ , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
snake_case : Any = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
snake_case : Any = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 203 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class _lowerCAmelCase :
def __init__( self , UpperCamelCase__ , ) -> str:
'''simple docstring'''
snake_case : List[str] = parent
snake_case : int = 13
snake_case : Optional[int] = 7
snake_case : Tuple = True
snake_case : Optional[Any] = True
snake_case : Optional[Any] = False
snake_case : Optional[Any] = True
snake_case : List[Any] = 99
snake_case : Union[str, Any] = 32
snake_case : Union[str, Any] = 2
snake_case : Union[str, Any] = 4
snake_case : Optional[Any] = 37
snake_case : str = "gelu"
snake_case : int = 0.1
snake_case : Any = 0.1
snake_case : Any = 512
snake_case : Union[str, Any] = 16
snake_case : List[str] = 2
snake_case : int = 0.02
snake_case : str = 3
snake_case : Tuple = 4
snake_case : Optional[Any] = None
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
snake_case : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case : Optional[Any] = None
if self.use_input_mask:
snake_case : Dict = random_attention_mask([self.batch_size, self.seq_length] )
snake_case : List[Any] = None
snake_case : int = None
snake_case : Tuple = None
if self.use_labels:
snake_case : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
snake_case : str = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Optional[Any] = TFDistilBertModel(config=UpperCamelCase__ )
snake_case : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask}
snake_case : Union[str, Any] = model(UpperCamelCase__ )
snake_case : Tuple = [input_ids, input_mask]
snake_case : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Any:
'''simple docstring'''
snake_case : Tuple = TFDistilBertForMaskedLM(config=UpperCamelCase__ )
snake_case : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask}
snake_case : List[str] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> str:
'''simple docstring'''
snake_case : List[Any] = TFDistilBertForQuestionAnswering(config=UpperCamelCase__ )
snake_case : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
}
snake_case : str = model(UpperCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> str:
'''simple docstring'''
snake_case : int = self.num_labels
snake_case : List[str] = TFDistilBertForSequenceClassification(UpperCamelCase__ )
snake_case : List[str] = {"input_ids": input_ids, "attention_mask": input_mask}
snake_case : List[str] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Any:
'''simple docstring'''
snake_case : Tuple = self.num_choices
snake_case : Any = TFDistilBertForMultipleChoice(UpperCamelCase__ )
snake_case : List[str] = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
snake_case : Any = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
snake_case : Optional[Any] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
}
snake_case : int = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
snake_case : str = self.num_labels
snake_case : Tuple = TFDistilBertForTokenClassification(UpperCamelCase__ )
snake_case : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask}
snake_case : List[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
snake_case : Any = self.prepare_config_and_inputs()
((snake_case) ,(snake_case) ,(snake_case) ,(snake_case) ,(snake_case) ,(snake_case)) : Union[str, Any] = config_and_inputs
snake_case : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _lowerCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
__UpperCAmelCase : List[str] = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
__UpperCAmelCase : Optional[int] = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : Optional[Any] = False
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
snake_case : int = TFDistilBertModelTester(self )
snake_case : str = ConfigTester(self , config_class=UpperCamelCase__ , dim=37 )
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*UpperCamelCase__ )
def lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*UpperCamelCase__ )
def lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*UpperCamelCase__ )
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*UpperCamelCase__ )
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*UpperCamelCase__ )
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*UpperCamelCase__ )
@slow
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
snake_case : Optional[int] = TFDistilBertModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
snake_case : str = TFDistilBertModel.from_pretrained("distilbert-base-uncased" )
snake_case : Optional[int] = tf.constant([[0, 1, 2, 3, 4, 5]] )
snake_case : Any = model(UpperCamelCase__ )[0]
snake_case : Dict = [1, 6, 768]
self.assertEqual(output.shape , UpperCamelCase__ )
snake_case : str = tf.constant(
[
[
[0.19261885, -0.13732955, 0.4119799],
[0.22150156, -0.07422661, 0.39037204],
[0.22756018, -0.0896414, 0.3701467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCamelCase__ , atol=1e-4 )
| 203 | 1 |
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Any = XLNetTokenizer
UpperCAmelCase__ : Optional[int] = XLNetTokenizerFast
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : str = True
def lowerCAmelCase__ ( self: Dict ):
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCamelCase = XLNetTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_ )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = """<s>"""
__lowerCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase_ ) , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """<eod>""" )
self.assertEqual(len(UpperCamelCase_ ) , 10_06 )
def lowerCAmelCase__ ( self: Optional[int] ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = XLNetTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_ )
__lowerCamelCase = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(UpperCamelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [2_85, 46, 10, 1_70, 3_82] )
__lowerCamelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCamelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
__lowerCamelCase = tokenizer.convert_tokens_to_ids(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(UpperCamelCase_ )
self.assertListEqual(
UpperCamelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = XLNetTokenizer(UpperCamelCase_ , do_lower_case=UpperCamelCase_ )
__lowerCamelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCamelCase_ , [
SPIECE_UNDERLINE + """""",
"""i""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""▁he""", """ll""", """o"""] )
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = XLNetTokenizer(UpperCamelCase_ , do_lower_case=UpperCamelCase_ )
__lowerCamelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCamelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
@slow
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = XLNetTokenizer.from_pretrained("""xlnet-base-cased""" )
__lowerCamelCase = tokenizer.encode("""sequence builders""" , add_special_tokens=UpperCamelCase_ )
__lowerCamelCase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=UpperCamelCase_ )
__lowerCamelCase = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ )
__lowerCamelCase = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ , UpperCamelCase_ )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def lowerCAmelCase__ ( self: Optional[int] ):
# fmt: off
__lowerCamelCase = {"""input_ids""": [[17, 2_14_42, 2_70, 17, 10, 1_46_45, 3_18, 34, 17, 45_46, 31_45, 7_87, 13, 77_52, 2_20_18, 23, 21, 17, 45_46, 31_45, 7_87, 13, 33_52, 1_44_31, 13, 55_00, 11, 11_76, 5_80, 13, 1_68_19, 47_97, 23, 17, 10, 1_71_35, 6_58, 19, 4_57, 79_32, 13, 1_84, 19, 31_54, 1_71_35, 64_68, 19, 14_04, 1_22_69, 19, 42_29, 53_56, 1_62_64, 46, 19, 17, 2_05_45, 1_03_95, 9, 9, 9, 11, 28, 64_21, 95_31, 2_07_29, 17, 10, 3_53, 1_70_22, 11, 21, 64_21, 95_31, 1_69_49, 17, 10, 1_15_09, 7_53, 11, 33, 95, 24_21, 73_85, 9_56, 1_44_31, 26_26, 25, 8_42, 73_85, 48_36, 21, 14_29, 22_72, 98_55, 31_20, 1_61, 2_47_38, 19, 1_32_03, 6_58, 2_18, 7_87, 21, 4_30, 1_84_82, 8_47, 26_37, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_22, 2_21_78, 27, 10_64, 22, 9_56, 13, 1_11_01, 14_29, 58_54, 2_43_13, 1_89_53, 40, 4_22, 2_43_66, 68, 17_58, 37, 1_04_83, 1_42_57, 31, 2_07, 2_63, 21, 2_03, 37_73, 25, 71, 97_35, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 20_49, 34_42, 17, 1_38_94, 33_80, 23, 95, 18, 1_76_34, 22_88, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase_ , model_name="""xlnet-base-cased""" , revision="""c841166438c31ec7ca9a106dee7bb312b73ae511""" , )
| 29 |
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class lowerCamelCase__( unittest.TestCase):
@parameterized.expand([(None,), ("""foo.json""",)] )
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: List[str] ):
__lowerCamelCase = GenerationConfig(
do_sample=UpperCamelCase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCamelCase_ , config_name=UpperCamelCase_ )
__lowerCamelCase = GenerationConfig.from_pretrained(UpperCamelCase_ , config_name=UpperCamelCase_ )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , UpperCamelCase_ )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = AutoConfig.from_pretrained("""gpt2""" )
__lowerCamelCase = GenerationConfig.from_model_config(UpperCamelCase_ )
__lowerCamelCase = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(UpperCamelCase_ , UpperCamelCase_ )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = GenerationConfig()
__lowerCamelCase = {
"""max_new_tokens""": 10_24,
"""foo""": """bar""",
}
__lowerCamelCase = copy.deepcopy(UpperCamelCase_ )
__lowerCamelCase = generation_config.update(**UpperCamelCase_ )
# update_kwargs was not modified (no side effects)
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 10_24 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(UpperCamelCase_ , {"""foo""": """bar"""} )
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = GenerationConfig()
__lowerCamelCase = """bar"""
with tempfile.TemporaryDirectory("""test-generation-config""" ) as tmp_dir:
generation_config.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = GenerationConfig.from_pretrained(UpperCamelCase_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , """bar""" )
__lowerCamelCase = GenerationConfig.from_model_config(UpperCamelCase_ )
assert not hasattr(UpperCamelCase_ , """foo""" ) # no new kwargs should be initialized if from config
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , UpperCamelCase_ )
self.assertEqual(default_config.num_beams , 1 )
__lowerCamelCase = GenerationConfig(
do_sample=UpperCamelCase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , UpperCamelCase_ )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = GenerationConfig.from_pretrained(UpperCamelCase_ , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , UpperCamelCase_ )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class lowerCamelCase__( unittest.TestCase):
@classmethod
def lowerCAmelCase__ ( cls: Optional[Any] ):
__lowerCamelCase = TOKEN
HfFolder.save_token(UpperCamelCase_ )
@classmethod
def lowerCAmelCase__ ( cls: str ):
try:
delete_repo(token=cls._token , repo_id="""test-generation-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-generation-config-org""" )
except HTTPError:
pass
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = GenerationConfig(
do_sample=UpperCamelCase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""test-generation-config""" , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained(F'{USER}/test-generation-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-generation-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCamelCase_ , repo_id="""test-generation-config""" , push_to_hub=UpperCamelCase_ , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained(F'{USER}/test-generation-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = GenerationConfig(
do_sample=UpperCamelCase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""valid_org/test-generation-config-org""" , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-generation-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCamelCase_ , repo_id="""valid_org/test-generation-config-org""" , push_to_hub=UpperCamelCase_ , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
| 29 | 1 |
'''simple docstring'''
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="""%(message)s""")
def UpperCamelCase_ ( _UpperCAmelCase : np.ndarray ) -> np.ndarray:
"""simple docstring"""
return input_array.reshape((input_array.size, 1) )
def UpperCamelCase_ ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : int ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase : Dict = np.nan
for i in range(_UpperCAmelCase ):
_UpperCAmelCase : Union[str, Any] = features[:, labels == i]
_UpperCAmelCase : Dict = data.mean(1 )
# Centralize the data of class i
_UpperCAmelCase : Any = data - column_reshape(_UpperCAmelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(_UpperCAmelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
_UpperCAmelCase : Tuple = np.dot(_UpperCAmelCase , centered_data.T )
return covariance_sum / features.shape[1]
def UpperCamelCase_ ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : int ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase : Any = features.mean(1 )
_UpperCAmelCase : Tuple = np.nan
for i in range(_UpperCAmelCase ):
_UpperCAmelCase : int = features[:, labels == i]
_UpperCAmelCase : Dict = data.shape[1]
_UpperCAmelCase : Dict = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(_UpperCAmelCase ) - column_reshape(_UpperCAmelCase ) , (column_reshape(_UpperCAmelCase ) - column_reshape(_UpperCAmelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
_UpperCAmelCase : int = device_data * np.dot(
column_reshape(_UpperCAmelCase ) - column_reshape(_UpperCAmelCase ) , (column_reshape(_UpperCAmelCase ) - column_reshape(_UpperCAmelCase )).T , )
return covariance_sum / features.shape[1]
def UpperCamelCase_ ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : int ) -> np.ndarray:
"""simple docstring"""
if features.any():
_UpperCAmelCase : Any = features.mean(1 )
# Center the dataset
_UpperCAmelCase : Tuple = features - np.reshape(_UpperCAmelCase , (data_mean.size, 1) )
_UpperCAmelCase : List[str] = np.dot(_UpperCAmelCase , centered_data.T ) / features.shape[1]
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = np.linalg.eigh(_UpperCAmelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
_UpperCAmelCase : Tuple = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
_UpperCAmelCase : List[Any] = np.dot(filtered_eigenvectors.T , _UpperCAmelCase )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_UpperCAmelCase )
logging.error("Dataset empty" )
raise AssertionError
def UpperCamelCase_ ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> np.ndarray:
"""simple docstring"""
assert classes > dimensions
# Check if features have been already loaded
if features.any:
_UpperCAmelCase , _UpperCAmelCase : str = eigh(
covariance_between_classes(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , covariance_within_classes(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , )
_UpperCAmelCase : Any = eigenvectors[:, ::-1][:, :dimensions]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[Any] = np.linalg.svd(_UpperCAmelCase )
_UpperCAmelCase : Union[str, Any] = svd_matrix[:, 0:dimensions]
_UpperCAmelCase : List[str] = np.dot(filtered_svd_matrix.T , _UpperCAmelCase )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_UpperCAmelCase )
logging.error("Dataset empty" )
raise AssertionError
def UpperCamelCase_ ( ) -> None:
"""simple docstring"""
_UpperCAmelCase : Dict = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
_UpperCAmelCase : Dict = np.array([0, 0, 0, 1, 1] )
_UpperCAmelCase : Tuple = 2
_UpperCAmelCase : int = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(_UpperCAmelCase ) as error_info:
_UpperCAmelCase : Any = linear_discriminant_analysis(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if isinstance(_UpperCAmelCase , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def UpperCamelCase_ ( ) -> None:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
_UpperCAmelCase : Optional[int] = 2
_UpperCAmelCase : List[Any] = np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 1_0.3_9_2_3_0_4_8_5], [3.0, 3.0, 3.0]] )
with pytest.raises(_UpperCAmelCase ) as error_info:
_UpperCAmelCase : Optional[Any] = principal_component_analysis(_UpperCAmelCase , _UpperCAmelCase )
if not np.allclose(_UpperCAmelCase , _UpperCAmelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
SCREAMING_SNAKE_CASE :Union[str, Any] = get_logger(__name__)
class UpperCAmelCase :
'''simple docstring'''
snake_case_ = "dummy_data"
snake_case_ = "datasets"
snake_case_ = False
def __init__( self : Optional[int] ,A : str ,A : str ,A : Union[Version, str] ,A : Optional[str] = None ,A : bool = False ,A : bool = True ,A : Optional[List[Callable]] = None ,):
__A = 0
__A = dataset_name
__A = cache_dir
__A = use_local_dummy_data
__A = config
# download_callbacks take a single url as input
__A = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
__A = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
__A = str(A )
# to be downloaded
__A = None
__A = None
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
if self._dummy_file is None:
__A = self.download_dummy_data()
return self._dummy_file
@property
def UpperCamelCase_ ( self : Optional[Any] ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("dummy" ,self.config.name ,self.version_name )
# structure is dummy / version_name
return os.path.join("dummy" ,self.version_name )
@property
def UpperCamelCase_ ( self : List[Any] ):
return os.path.join(self.dummy_data_folder ,"dummy_data.zip" )
def UpperCamelCase_ ( self : Tuple ):
__A = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
__A = cached_path(
A ,cache_dir=self.cache_dir ,extract_compressed_file=A ,force_extract=A )
return os.path.join(A ,self.dummy_file_name )
@property
def UpperCamelCase_ ( self : str ):
return os.path.join(self.datasets_scripts_dir ,self.dataset_name ,self.dummy_zip_file )
@property
def UpperCamelCase_ ( self : Any ):
if self._bucket_url is None:
__A = hf_github_url(self.dataset_name ,self.dummy_zip_file.replace(os.sep ,"/" ) )
return self._bucket_url
@property
def UpperCamelCase_ ( self : Tuple ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep ,"/" ).split("/" )[:-1] )
def UpperCamelCase_ ( self : List[str] ,A : List[Any] ,*A : Dict ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
__A = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
__A = self.dummy_file_name
# special case when data_url is a dict
if isinstance(A ,A ):
return self.create_dummy_data_dict(A ,A )
elif isinstance(A ,(list, tuple) ):
return self.create_dummy_data_list(A ,A )
else:
return self.create_dummy_data_single(A ,A )
def UpperCamelCase_ ( self : str ,A : List[Any] ,*A : List[Any] ):
return self.download_and_extract(A )
def UpperCamelCase_ ( self : List[str] ,A : List[str] ,A : Tuple ):
return self.download_and_extract(A )
def UpperCamelCase_ ( self : Any ,A : Any ,*A : Optional[Any] ,**A : List[str] ):
return path
def UpperCamelCase_ ( self : str ):
return {}
def UpperCamelCase_ ( self : int ,A : int ,A : Tuple ):
__A = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(A ,A ):
for single_url in single_urls:
download_callback(A )
else:
__A = single_urls
download_callback(A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(A ,A ):
__A = [os.path.join(A ,urllib.parse.quote_plus(Path(A ).name ) ) for x in single_urls]
else:
__A = single_urls
__A = os.path.join(A ,urllib.parse.quote_plus(Path(A ).name ) )
__A = value
# make sure that values are unique
if all(isinstance(A ,A ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
__A = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def UpperCamelCase_ ( self : Union[str, Any] ,A : str ,A : str ):
__A = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
__A = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" ,A ) ) for url in data_url )
__A = all(
url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
__A = [data_url[0]] * len(A )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__A = os.path.join(A ,urllib.parse.quote_plus(single_url.split("/" )[-1] ) )
dummy_data_list.append(A )
return dummy_data_list
def UpperCamelCase_ ( self : str ,A : List[Any] ,A : Optional[Any] ):
for download_callback in self.download_callbacks:
download_callback(A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__A = os.path.join(A ,urllib.parse.quote_plus(data_url.split("/" )[-1] ) )
if os.path.exists(A ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def UpperCamelCase_ ( self : int ):
pass
def UpperCamelCase_ ( self : Dict ):
pass
def UpperCamelCase_ ( self : Optional[Any] ,A : List[Any] ):
def _iter_archive_members(A : Optional[Any] ):
# this preserves the order of the members inside the ZIP archive
__A = Path(self.dummy_file ).parent
__A = path.relative_to(A )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
__A = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(A )
__A = Path(A )
__A = _iter_archive_members(A ) if self.use_local_dummy_data else path.rglob("*" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((".", "__") ):
yield file_path.relative_to(A ).as_posix(), file_path.open("rb" )
def UpperCamelCase_ ( self : List[Any] ,A : Any ):
if not isinstance(A ,A ):
__A = [paths]
for path in paths:
if os.path.isfile(A ):
if os.path.basename(A ).startswith((".", "__") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(A ):
if os.path.basename(A ).startswith((".", "__") ):
continue
dirnames.sort()
for filename in sorted(A ):
if filename.startswith((".", "__") ):
continue
yield os.path.join(A ,A )
| 15 | 0 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, ) -> Dict:
'''simple docstring'''
snake_case_ = coefficient_matrix.shape
snake_case_ = constant_matrix.shape
if rowsa != colsa:
snake_case_ = F"Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"
raise ValueError(__UpperCAmelCase )
if colsa != 1:
snake_case_ = F"Constant matrix must be nx1 but received {rowsa}x{colsa}"
raise ValueError(__UpperCAmelCase )
if rowsa != rowsa:
snake_case_ = (
'Coefficient and constant matrices dimensions must be nxn and nx1 but '
F"received {rowsa}x{colsa} and {rowsa}x{colsa}"
)
raise ValueError(__UpperCAmelCase )
if len(__UpperCAmelCase ) != rowsa:
snake_case_ = (
'Number of initial values must be equal to number of rows in coefficient '
F"matrix but received {len(__UpperCAmelCase )} and {rowsa}"
)
raise ValueError(__UpperCAmelCase )
if iterations <= 0:
raise ValueError('''Iterations must be at least 1''' )
snake_case_ = np.concatenate(
(coefficient_matrix, constant_matrix), axis=1 )
snake_case_ = table.shape
strictly_diagonally_dominant(__UpperCAmelCase )
# Iterates the whole matrix for given number of times
for _ in range(__UpperCAmelCase ):
snake_case_ = []
for row in range(__UpperCAmelCase ):
snake_case_ = 0
for col in range(__UpperCAmelCase ):
if col == row:
snake_case_ = table[row][col]
elif col == cols - 1:
snake_case_ = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
snake_case_ = (temp + val) / denom
new_val.append(__UpperCAmelCase )
snake_case_ = new_val
return [float(__UpperCAmelCase ) for i in new_val]
def __magic_name__ ( __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
snake_case_ = table.shape
snake_case_ = True
for i in range(0, __UpperCAmelCase ):
snake_case_ = 0
for j in range(0, cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('''Coefficient matrix is not strictly diagonally dominant''' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 356 |
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class a ( unittest.TestCase ):
def A_ ( self : List[Any] ):
snake_case_ = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowercase_ ) )
def A_ ( self : Optional[Any] ):
snake_case_ = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowercase_ ) )
def A_ ( self : List[Any] ):
snake_case_ = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowercase_ ) )
def A_ ( self : str ):
snake_case_ = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowercase_ ) )
def A_ ( self : Optional[int] ):
snake_case_ = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
# Removed: 'text_encoder/model.safetensors',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertFalse(is_safetensors_compatible(lowercase_ ) )
def A_ ( self : Union[str, Any] ):
snake_case_ = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
snake_case_ = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_ ) )
def A_ ( self : int ):
snake_case_ = [
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
snake_case_ = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_ ) )
def A_ ( self : Any ):
# pass variant but use the non-variant filenames
snake_case_ = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
snake_case_ = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_ ) )
def A_ ( self : int ):
snake_case_ = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
snake_case_ = '''fp16'''
self.assertFalse(is_safetensors_compatible(lowercase_ , variant=lowercase_ ) )
def A_ ( self : str ):
snake_case_ = [
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
]
snake_case_ = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_ ) )
def A_ ( self : Tuple ):
# pass variant but use the non-variant filenames
snake_case_ = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
snake_case_ = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_ ) )
def A_ ( self : List[str] ):
snake_case_ = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
# 'text_encoder/model.fp16.safetensors',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
snake_case_ = '''fp16'''
self.assertFalse(is_safetensors_compatible(lowercase_ , variant=lowercase_ ) )
| 72 | 0 |
'''simple docstring'''
def lowerCAmelCase_ ( _lowerCamelCase: int ):
__SCREAMING_SNAKE_CASE : List[Any] = [1]
__SCREAMING_SNAKE_CASE : Optional[Any] = 0, 0, 0
__SCREAMING_SNAKE_CASE : Union[str, Any] = ugly_nums[ia] * 2
__SCREAMING_SNAKE_CASE : Any = ugly_nums[ia] * 3
__SCREAMING_SNAKE_CASE : str = ugly_nums[ia] * 5
for _ in range(1 , UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Tuple = min(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
ugly_nums.append(UpperCAmelCase__ )
if next_num == next_a:
ia += 1
__SCREAMING_SNAKE_CASE : List[str] = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
__SCREAMING_SNAKE_CASE : Any = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
__SCREAMING_SNAKE_CASE : Dict = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f"{ugly_numbers(2_00) = }") | 112 | '''simple docstring'''
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
_lowercase : Optional[int] = "\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n"
_lowercase : List[Any] = "\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n"
_lowercase : List[Any] = "\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: \"c\" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric('mauve')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class __magic_name__ ( datasets.Metric):
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/krishnap25/mauve""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/krishnap25/mauve"""] , reference_urls=[
"""https://arxiv.org/abs/2102.01454""",
"""https://github.com/krishnap25/mauve""",
] , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : Any , lowercase_ : Any=None , lowercase_ : str=None , lowercase_ : Dict=None , lowercase_ : Any=None , lowercase_ : int="auto" , lowercase_ : Tuple=-1 , lowercase_ : str=0.9 , lowercase_ : Union[str, Any]=5 , lowercase_ : List[str]=500 , lowercase_ : Union[str, Any]="gpt2-large" , lowercase_ : List[Any]=-1 , lowercase_ : str=1024 , lowercase_ : List[str]=25 , lowercase_ : str=5 , lowercase_ : List[Any]=True , lowercase_ : Tuple=25 , ):
lowercase_ : List[str] = compute_mauve(
p_text=lowercase_ , q_text=lowercase_ , p_features=lowercase_ , q_features=lowercase_ , p_tokens=lowercase_ , q_tokens=lowercase_ , num_buckets=lowercase_ , pca_max_data=lowercase_ , kmeans_explained_var=lowercase_ , kmeans_num_redo=lowercase_ , kmeans_max_iter=lowercase_ , featurize_model_name=lowercase_ , device_id=lowercase_ , max_text_length=lowercase_ , divergence_curve_discretization_size=lowercase_ , mauve_scaling_factor=lowercase_ , verbose=lowercase_ , seed=lowercase_ , )
return out
| 239 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase : Tuple = {
"configuration_lxmert": ["LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LxmertConfig"],
"tokenization_lxmert": ["LxmertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] = ["LxmertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = [
"LxmertEncoder",
"LxmertForPreTraining",
"LxmertForQuestionAnswering",
"LxmertModel",
"LxmertPreTrainedModel",
"LxmertVisualFeatureEncoder",
"LxmertXLayer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = [
"TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLxmertForPreTraining",
"TFLxmertMainLayer",
"TFLxmertModel",
"TFLxmertPreTrainedModel",
"TFLxmertVisualFeatureEncoder",
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
_lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 357 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
return int((input_a, input_a).count(0 ) != 0 )
def snake_case__ ( ):
"""simple docstring"""
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 272 | 0 |
"""simple docstring"""
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class lowerCAmelCase ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = 1
@register_to_config
def __init__( self , lowerCAmelCase__ = 1_000 , lowerCAmelCase__ = None ) -> int:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(lowerCAmelCase__ )
# standard deviation of the initial noise distribution
SCREAMING_SNAKE_CASE = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
SCREAMING_SNAKE_CASE = 4
# running values
SCREAMING_SNAKE_CASE = []
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[Any]:
SCREAMING_SNAKE_CASE = num_inference_steps
SCREAMING_SNAKE_CASE = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
SCREAMING_SNAKE_CASE = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
SCREAMING_SNAKE_CASE = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
SCREAMING_SNAKE_CASE = torch.sin(steps * math.pi / 2 ) ** 2
SCREAMING_SNAKE_CASE = (1.0 - self.betas**2) ** 0.5
SCREAMING_SNAKE_CASE = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
SCREAMING_SNAKE_CASE = timesteps.to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = []
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = True , ) -> Union[SchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
SCREAMING_SNAKE_CASE = (self.timesteps == timestep).nonzero().item()
SCREAMING_SNAKE_CASE = timestep_index + 1
SCREAMING_SNAKE_CASE = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(lowerCAmelCase__ )
if len(self.ets ) == 1:
SCREAMING_SNAKE_CASE = self.ets[-1]
elif len(self.ets ) == 2:
SCREAMING_SNAKE_CASE = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
SCREAMING_SNAKE_CASE = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
SCREAMING_SNAKE_CASE = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
SCREAMING_SNAKE_CASE = self._get_prev_sample(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCAmelCase__ )
def __A ( self , lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> torch.FloatTensor:
return sample
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = self.alphas[timestep_index]
SCREAMING_SNAKE_CASE = self.betas[timestep_index]
SCREAMING_SNAKE_CASE = self.alphas[prev_timestep_index]
SCREAMING_SNAKE_CASE = self.betas[prev_timestep_index]
SCREAMING_SNAKE_CASE = (sample - sigma * ets) / max(lowerCAmelCase__ , 1e-8 )
SCREAMING_SNAKE_CASE = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self ) -> str:
return self.config.num_train_timesteps
| 113 |
"""simple docstring"""
__UpperCamelCase = frozenset(
[
'''prompt''',
'''height''',
'''width''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
'''cross_attention_kwargs''',
]
)
__UpperCamelCase = frozenset(['''prompt''', '''negative_prompt'''])
__UpperCamelCase = frozenset([])
__UpperCamelCase = frozenset(['''image'''])
__UpperCamelCase = frozenset(
[
'''image''',
'''height''',
'''width''',
'''guidance_scale''',
]
)
__UpperCamelCase = frozenset(['''image'''])
__UpperCamelCase = frozenset(
[
'''prompt''',
'''image''',
'''height''',
'''width''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
]
)
__UpperCamelCase = frozenset(['''prompt''', '''image''', '''negative_prompt'''])
__UpperCamelCase = frozenset(
[
# Text guided image variation with an image mask
'''prompt''',
'''image''',
'''mask_image''',
'''height''',
'''width''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
]
)
__UpperCamelCase = frozenset(['''prompt''', '''image''', '''mask_image''', '''negative_prompt'''])
__UpperCamelCase = frozenset(
[
# image variation with an image mask
'''image''',
'''mask_image''',
'''height''',
'''width''',
'''guidance_scale''',
]
)
__UpperCamelCase = frozenset(['''image''', '''mask_image'''])
__UpperCamelCase = frozenset(
[
'''example_image''',
'''image''',
'''mask_image''',
'''height''',
'''width''',
'''guidance_scale''',
]
)
__UpperCamelCase = frozenset(['''example_image''', '''image''', '''mask_image'''])
__UpperCamelCase = frozenset(['''class_labels'''])
__UpperCamelCase = frozenset(['''class_labels'''])
__UpperCamelCase = frozenset(['''batch_size'''])
__UpperCamelCase = frozenset([])
__UpperCamelCase = frozenset(['''batch_size'''])
__UpperCamelCase = frozenset([])
__UpperCamelCase = frozenset(
[
'''prompt''',
'''audio_length_in_s''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
'''cross_attention_kwargs''',
]
)
__UpperCamelCase = frozenset(['''prompt''', '''negative_prompt'''])
__UpperCamelCase = frozenset(['''input_tokens'''])
__UpperCamelCase = frozenset(['''input_tokens'''])
| 113 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Tuple = logging.get_logger(__name__)
_lowerCamelCase : List[str] = {
"microsoft/unispeech-sat-base-100h-libri-ft": (
"https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json"
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class lowercase ( a ):
lowercase__ : Any = """unispeech-sat"""
def __init__( self : str , _UpperCamelCase : Any=32 , _UpperCamelCase : int=768 , _UpperCamelCase : str=12 , _UpperCamelCase : int=12 , _UpperCamelCase : List[Any]=3_072 , _UpperCamelCase : Optional[Any]="gelu" , _UpperCamelCase : List[Any]=0.1 , _UpperCamelCase : List[str]=0.1 , _UpperCamelCase : List[str]=0.1 , _UpperCamelCase : List[Any]=0.0 , _UpperCamelCase : int=0.0 , _UpperCamelCase : Union[str, Any]=0.1 , _UpperCamelCase : List[str]=0.1 , _UpperCamelCase : List[Any]=0.0_2 , _UpperCamelCase : Optional[Any]=1e-5 , _UpperCamelCase : int="group" , _UpperCamelCase : Any="gelu" , _UpperCamelCase : Any=(512, 512, 512, 512, 512, 512, 512) , _UpperCamelCase : str=(5, 2, 2, 2, 2, 2, 2) , _UpperCamelCase : Tuple=(10, 3, 3, 3, 3, 2, 2) , _UpperCamelCase : Optional[int]=False , _UpperCamelCase : Tuple=128 , _UpperCamelCase : Optional[int]=16 , _UpperCamelCase : List[Any]=False , _UpperCamelCase : List[str]=True , _UpperCamelCase : str=0.0_5 , _UpperCamelCase : Optional[Any]=10 , _UpperCamelCase : Dict=2 , _UpperCamelCase : str=0.0 , _UpperCamelCase : Any=10 , _UpperCamelCase : str=0 , _UpperCamelCase : Union[str, Any]=320 , _UpperCamelCase : List[Any]=2 , _UpperCamelCase : str=0.1 , _UpperCamelCase : Dict=100 , _UpperCamelCase : Tuple=256 , _UpperCamelCase : Any=256 , _UpperCamelCase : Tuple=0.1 , _UpperCamelCase : Optional[int]="mean" , _UpperCamelCase : List[Any]=False , _UpperCamelCase : Union[str, Any]=False , _UpperCamelCase : Any=256 , _UpperCamelCase : List[Any]=(512, 512, 512, 512, 1_500) , _UpperCamelCase : Optional[int]=(5, 3, 3, 1, 1) , _UpperCamelCase : List[Any]=(1, 2, 3, 1, 1) , _UpperCamelCase : List[Any]=512 , _UpperCamelCase : List[str]=0 , _UpperCamelCase : List[str]=1 , _UpperCamelCase : List[Any]=2 , _UpperCamelCase : Optional[Any]=504 , **_UpperCamelCase : Dict , ) -> List[Any]:
'''simple docstring'''
super().__init__(**_A , pad_token_id=_A , bos_token_id=_A , eos_token_id=_A )
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = feat_extract_norm
SCREAMING_SNAKE_CASE = feat_extract_activation
SCREAMING_SNAKE_CASE = list(_A )
SCREAMING_SNAKE_CASE = list(_A )
SCREAMING_SNAKE_CASE = list(_A )
SCREAMING_SNAKE_CASE = conv_bias
SCREAMING_SNAKE_CASE = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE = len(self.conv_dim )
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_dropout
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = activation_dropout
SCREAMING_SNAKE_CASE = feat_proj_dropout
SCREAMING_SNAKE_CASE = final_dropout
SCREAMING_SNAKE_CASE = layerdrop
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = num_clusters
SCREAMING_SNAKE_CASE = do_stable_layer_norm
SCREAMING_SNAKE_CASE = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE = apply_spec_augment
SCREAMING_SNAKE_CASE = mask_time_prob
SCREAMING_SNAKE_CASE = mask_time_length
SCREAMING_SNAKE_CASE = mask_time_min_masks
SCREAMING_SNAKE_CASE = mask_feature_prob
SCREAMING_SNAKE_CASE = mask_feature_length
SCREAMING_SNAKE_CASE = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
SCREAMING_SNAKE_CASE = num_codevectors_per_group
SCREAMING_SNAKE_CASE = num_codevector_groups
SCREAMING_SNAKE_CASE = contrastive_logits_temperature
SCREAMING_SNAKE_CASE = feat_quantizer_dropout
SCREAMING_SNAKE_CASE = num_negatives
SCREAMING_SNAKE_CASE = codevector_dim
SCREAMING_SNAKE_CASE = proj_codevector_dim
SCREAMING_SNAKE_CASE = diversity_loss_weight
# ctc loss
SCREAMING_SNAKE_CASE = ctc_loss_reduction
SCREAMING_SNAKE_CASE = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE = list(_A )
SCREAMING_SNAKE_CASE = list(_A )
SCREAMING_SNAKE_CASE = list(_A )
SCREAMING_SNAKE_CASE = xvector_output_dim
@property
def __snake_case( self : Any ) -> Tuple:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 357 | import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Any = logging.get_logger(__name__)
def __lowerCamelCase (UpperCAmelCase__ : Dict ):
SCREAMING_SNAKE_CASE = SwinConfig(
embed_dim=1_9_2 , depths=(2, 2, 1_8, 2) , num_heads=(6, 1_2, 2_4, 4_8) , window_size=1_2 , out_features=["stage2", "stage3", "stage4"] , )
SCREAMING_SNAKE_CASE = DetaConfig(
backbone_config=UpperCAmelCase__ , num_queries=9_0_0 , encoder_ffn_dim=2_0_4_8 , decoder_ffn_dim=2_0_4_8 , num_feature_levels=5 , assign_first_stage=UpperCAmelCase__ , with_box_refine=UpperCAmelCase__ , two_stage=UpperCAmelCase__ , )
# set labels
SCREAMING_SNAKE_CASE = "huggingface/label-files"
if "o365" in model_name:
SCREAMING_SNAKE_CASE = 3_6_6
SCREAMING_SNAKE_CASE = "object365-id2label.json"
else:
SCREAMING_SNAKE_CASE = 9_1
SCREAMING_SNAKE_CASE = "coco-detection-id2label.json"
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = json.load(open(cached_download(hf_hub_url(UpperCAmelCase__ , UpperCAmelCase__ , repo_type="dataset" ) ) , "r" ) )
SCREAMING_SNAKE_CASE = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
return config
def __lowerCamelCase (UpperCAmelCase__ : Tuple ):
SCREAMING_SNAKE_CASE = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.patch_embed.proj.weight", "model.backbone.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.0.body.patch_embed.proj.bias", "model.backbone.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.0.body.patch_embed.norm.weight", "model.backbone.model.embeddings.norm.weight") )
rename_keys.append(("backbone.0.body.patch_embed.norm.bias", "model.backbone.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm1.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm1.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm2.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm2.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((F"backbone.0.body.layers.{i}.downsample.reduction.weight", F"model.backbone.model.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.downsample.norm.weight", F"model.backbone.model.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.downsample.norm.bias", F"model.backbone.model.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append(("backbone.0.body.norm1.weight", "model.backbone.model.hidden_states_norms.stage2.weight") )
rename_keys.append(("backbone.0.body.norm1.bias", "model.backbone.model.hidden_states_norms.stage2.bias") )
rename_keys.append(("backbone.0.body.norm2.weight", "model.backbone.model.hidden_states_norms.stage3.weight") )
rename_keys.append(("backbone.0.body.norm2.bias", "model.backbone.model.hidden_states_norms.stage3.bias") )
rename_keys.append(("backbone.0.body.norm3.weight", "model.backbone.model.hidden_states_norms.stage4.weight") )
rename_keys.append(("backbone.0.body.norm3.bias", "model.backbone.model.hidden_states_norms.stage4.bias") )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight", F"model.encoder.layers.{i}.self_attn.sampling_offsets.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias", F"model.encoder.layers.{i}.self_attn.sampling_offsets.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.attention_weights.weight", F"model.encoder.layers.{i}.self_attn.attention_weights.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.attention_weights.bias", F"model.encoder.layers.{i}.self_attn.attention_weights.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.value_proj.weight", F"model.encoder.layers.{i}.self_attn.value_proj.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.value_proj.bias", F"model.encoder.layers.{i}.self_attn.value_proj.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.output_proj.weight", F"model.encoder.layers.{i}.self_attn.output_proj.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.output_proj.bias", F"model.encoder.layers.{i}.self_attn.output_proj.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm1.weight", F"model.encoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm1.bias", F"model.encoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.weight", F"model.encoder.layers.{i}.fc1.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.bias", F"model.encoder.layers.{i}.fc1.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.weight", F"model.encoder.layers.{i}.fc2.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.bias", F"model.encoder.layers.{i}.fc2.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.weight", F"model.encoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.bias", F"model.encoder.layers.{i}.final_layer_norm.bias") )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight", F"model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias", F"model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.attention_weights.weight", F"model.decoder.layers.{i}.encoder_attn.attention_weights.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.attention_weights.bias", F"model.decoder.layers.{i}.encoder_attn.attention_weights.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.value_proj.weight", F"model.decoder.layers.{i}.encoder_attn.value_proj.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.value_proj.bias", F"model.decoder.layers.{i}.encoder_attn.value_proj.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.output_proj.weight", F"model.decoder.layers.{i}.encoder_attn.output_proj.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.output_proj.bias", F"model.decoder.layers.{i}.encoder_attn.output_proj.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm1.weight", F"model.decoder.layers.{i}.encoder_attn_layer_norm.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm1.bias", F"model.decoder.layers.{i}.encoder_attn_layer_norm.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.self_attn.out_proj.weight", F"model.decoder.layers.{i}.self_attn.out_proj.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.self_attn.out_proj.bias", F"model.decoder.layers.{i}.self_attn.out_proj.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm2.weight", F"model.decoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm2.bias", F"model.decoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.weight", F"model.decoder.layers.{i}.fc1.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.bias", F"model.decoder.layers.{i}.fc1.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.weight", F"model.decoder.layers.{i}.fc2.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.bias", F"model.decoder.layers.{i}.fc2.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.weight", F"model.decoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.bias", F"model.decoder.layers.{i}.final_layer_norm.bias") )
# fmt: on
return rename_keys
def __lowerCamelCase (UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] ):
SCREAMING_SNAKE_CASE = dct.pop(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = val
def __lowerCamelCase (UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Tuple ):
SCREAMING_SNAKE_CASE = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
SCREAMING_SNAKE_CASE = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE = state_dict.pop(F"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight" )
SCREAMING_SNAKE_CASE = state_dict.pop(F"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE = in_proj_weight[:dim, :]
SCREAMING_SNAKE_CASE = in_proj_bias[: dim]
SCREAMING_SNAKE_CASE = in_proj_weight[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[
dim : dim * 2
]
SCREAMING_SNAKE_CASE = in_proj_weight[
-dim :, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[-dim :]
# fmt: on
def __lowerCamelCase (UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] ):
# transformer decoder self-attention layers
SCREAMING_SNAKE_CASE = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
SCREAMING_SNAKE_CASE = state_dict.pop(F"transformer.decoder.layers.{i}.self_attn.in_proj_weight" )
SCREAMING_SNAKE_CASE = state_dict.pop(F"transformer.decoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE = in_proj_weight[:hidden_size, :]
SCREAMING_SNAKE_CASE = in_proj_bias[:hidden_size]
SCREAMING_SNAKE_CASE = in_proj_weight[
hidden_size : hidden_size * 2, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[hidden_size : hidden_size * 2]
SCREAMING_SNAKE_CASE = in_proj_weight[-hidden_size:, :]
SCREAMING_SNAKE_CASE = in_proj_bias[-hidden_size:]
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE = Image.open(requests.get(UpperCAmelCase__ , stream=UpperCAmelCase__ ).raw )
return im
@torch.no_grad()
def __lowerCamelCase (UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] ):
SCREAMING_SNAKE_CASE = get_deta_config(UpperCAmelCase__ )
# load original state dict
if model_name == "deta-swin-large":
SCREAMING_SNAKE_CASE = hf_hub_download(repo_id="nielsr/deta-checkpoints" , filename="adet_swin_ft.pth" )
elif model_name == "deta-swin-large-o365":
SCREAMING_SNAKE_CASE = hf_hub_download(repo_id="jozhang97/deta-swin-l-o365" , filename="deta_swin_pt_o365.pth" )
else:
raise ValueError(F"Model name {model_name} not supported" )
SCREAMING_SNAKE_CASE = torch.load(UpperCAmelCase__ , map_location="cpu" )["model"]
# original state dict
for name, param in state_dict.items():
print(UpperCAmelCase__ , param.shape )
# rename keys
SCREAMING_SNAKE_CASE = create_rename_keys(UpperCAmelCase__ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
read_in_swin_q_k_v(UpperCAmelCase__ , config.backbone_config )
read_in_decoder_q_k_v(UpperCAmelCase__ , UpperCAmelCase__ )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
SCREAMING_SNAKE_CASE = state_dict.pop(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = val
if "input_proj" in key:
SCREAMING_SNAKE_CASE = state_dict.pop(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
SCREAMING_SNAKE_CASE = state_dict.pop(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = val
# finally, create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE = DetaForObjectDetection(UpperCAmelCase__ )
model.load_state_dict(UpperCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE = "cuda" if torch.cuda.is_available() else "cpu"
model.to(UpperCAmelCase__ )
# load image processor
SCREAMING_SNAKE_CASE = DetaImageProcessor(format="coco_detection" )
# verify our conversion on image
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = processor(images=UpperCAmelCase__ , return_tensors="pt" )
SCREAMING_SNAKE_CASE = encoding["pixel_values"]
SCREAMING_SNAKE_CASE = model(pixel_values.to(UpperCAmelCase__ ) )
# verify logits
print("Logits:" , outputs.logits[0, :3, :3] )
print("Boxes:" , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
SCREAMING_SNAKE_CASE = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
SCREAMING_SNAKE_CASE = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
SCREAMING_SNAKE_CASE = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
SCREAMING_SNAKE_CASE = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(UpperCAmelCase__ ) , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(UpperCAmelCase__ ) , atol=1e-4 )
print("Everything ok!" )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(F"Saving PyTorch model and processor to {pytorch_dump_folder_path}..." )
Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ )
model.save_pretrained(UpperCAmelCase__ )
processor.save_pretrained(UpperCAmelCase__ )
# Push to hub
if push_to_hub:
print("Pushing model and processor to hub..." )
model.push_to_hub(F"jozhang97/{model_name}" )
processor.push_to_hub(F"jozhang97/{model_name}" )
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
type=str,
default='''deta-swin-large''',
choices=['''deta-swin-large''', '''deta-swin-large-o365'''],
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
help='''Path to the folder to output PyTorch model.''',
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_lowerCamelCase : Any = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 206 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__UpperCAmelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 29 |
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'tensor(bool)': np.bool_,
'tensor(int8)': np.inta,
'tensor(uint8)': np.uinta,
'tensor(int16)': np.intaa,
'tensor(uint16)': np.uintaa,
'tensor(int32)': np.intaa,
'tensor(uint32)': np.uintaa,
'tensor(int64)': np.intaa,
'tensor(uint64)': np.uintaa,
'tensor(float16)': np.floataa,
'tensor(float)': np.floataa,
'tensor(double)': np.floataa,
}
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase=None , **_UpperCamelCase ) -> Dict:
logger.info('`diffusers.OnnxRuntimeModel` is experimental and might change in the future.' )
UpperCAmelCase_ : Any = model
UpperCAmelCase_ : int = kwargs.get('model_save_dir' , _UpperCamelCase )
UpperCAmelCase_ : List[Any] = kwargs.get('latest_model_name' , _UpperCamelCase )
def __call__( self , **_UpperCamelCase ) -> str:
UpperCAmelCase_ : Optional[int] = {k: np.array(_UpperCamelCase ) for k, v in kwargs.items()}
return self.model.run(_UpperCamelCase , _UpperCamelCase )
@staticmethod
def __UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None ) -> List[Any]:
if provider is None:
logger.info('No onnxruntime provider specified, using CPUExecutionProvider' )
UpperCAmelCase_ : List[str] = 'CPUExecutionProvider'
return ort.InferenceSession(_UpperCamelCase , providers=[provider] , sess_options=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ) -> Dict:
UpperCAmelCase_ : Any = file_name if file_name is not None else ONNX_WEIGHTS_NAME
UpperCAmelCase_ : Optional[Any] = self.model_save_dir.joinpath(self.latest_model_name )
UpperCAmelCase_ : str = Path(_UpperCamelCase ).joinpath(_UpperCamelCase )
try:
shutil.copyfile(_UpperCamelCase , _UpperCamelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
UpperCAmelCase_ : Optional[Any] = self.model_save_dir.joinpath(_UpperCamelCase )
if src_path.exists():
UpperCAmelCase_ : List[Any] = Path(_UpperCamelCase ).joinpath(_UpperCamelCase )
try:
shutil.copyfile(_UpperCamelCase , _UpperCamelCase )
except shutil.SameFileError:
pass
def __UpperCAmelCase ( self , _UpperCamelCase , **_UpperCamelCase , ) -> List[str]:
if os.path.isfile(_UpperCamelCase ):
logger.error(f"Provided path ({save_directory}) should be a directory, not a file" )
return
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
# saving model weights/files
self._save_pretrained(_UpperCamelCase , **_UpperCamelCase )
@classmethod
def __UpperCAmelCase ( cls , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ) -> List[str]:
UpperCAmelCase_ : List[str] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(_UpperCamelCase ):
UpperCAmelCase_ : Union[str, Any] = OnnxRuntimeModel.load_model(
os.path.join(_UpperCamelCase , _UpperCamelCase ) , provider=_UpperCamelCase , sess_options=_UpperCamelCase )
UpperCAmelCase_ : Tuple = Path(_UpperCamelCase )
# load model from hub
else:
# download model
UpperCAmelCase_ : List[str] = hf_hub_download(
repo_id=_UpperCamelCase , filename=_UpperCamelCase , use_auth_token=_UpperCamelCase , revision=_UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , )
UpperCAmelCase_ : Union[str, Any] = Path(_UpperCamelCase ).parent
UpperCAmelCase_ : List[str] = Path(_UpperCamelCase ).name
UpperCAmelCase_ : Union[str, Any] = OnnxRuntimeModel.load_model(_UpperCamelCase , provider=_UpperCamelCase , sess_options=_UpperCamelCase )
return cls(model=_UpperCamelCase , **_UpperCamelCase )
@classmethod
def __UpperCAmelCase ( cls , _UpperCamelCase , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ) -> Optional[int]:
UpperCAmelCase_ : List[str] = None
if len(str(_UpperCamelCase ).split('@' ) ) == 2:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = model_id.split('@' )
return cls._from_pretrained(
model_id=_UpperCamelCase , revision=_UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , use_auth_token=_UpperCamelCase , **_UpperCamelCase , )
| 29 | 1 |
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class __UpperCAmelCase ( yaml.SafeLoader ):
'''simple docstring'''
def __A ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
A_ = [self.constructed_objects[key_node] for key_node, _ in node.value]
A_ = [tuple(_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else key for key in keys]
A_ = Counter(_SCREAMING_SNAKE_CASE )
A_ = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F'''Got duplicate yaml keys: {duplicate_keys}''' )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Optional[int]:
A_ = super().construct_mapping(_SCREAMING_SNAKE_CASE , deep=_SCREAMING_SNAKE_CASE )
self._check_no_duplicates_on_constructed_node(_SCREAMING_SNAKE_CASE )
return mapping
def _UpperCAmelCase ( _UpperCamelCase : str ) -> Tuple[Optional[str], str]:
A_ = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
A_ = full_content[1:].index('''---''' ) + 1
A_ = '''\n'''.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(_UpperCamelCase )
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowercase : Tuple = {'train_eval_index'} # train-eval-index in the YAML metadata
@classmethod
def __A ( cls , _SCREAMING_SNAKE_CASE ) -> "DatasetMetadata":
with open(_SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as readme_file:
A_ ,A_ = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(_SCREAMING_SNAKE_CASE )
else:
return cls()
def __A ( self , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
if path.exists():
with open(_SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as readme_file:
A_ = readme_file.read()
else:
A_ = None
A_ = self._to_readme(_SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as readme_file:
readme_file.write(_SCREAMING_SNAKE_CASE )
def __A ( self , _SCREAMING_SNAKE_CASE = None ) -> str:
if readme_content is not None:
A_ ,A_ = _split_yaml_from_readme(_SCREAMING_SNAKE_CASE )
A_ = '''---\n''' + self.to_yaml_string() + '''---\n''' + content
else:
A_ = '''---\n''' + self.to_yaml_string() + '''---\n'''
return full_content
@classmethod
def __A ( cls , _SCREAMING_SNAKE_CASE ) -> "DatasetMetadata":
A_ = yaml.load(_SCREAMING_SNAKE_CASE , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
A_ = {
(key.replace('''-''' , '''_''' ) if key.replace('''-''' , '''_''' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**_SCREAMING_SNAKE_CASE )
def __A ( self ) -> str:
return yaml.safe_dump(
{
(key.replace('''_''' , '''-''' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=_SCREAMING_SNAKE_CASE , allow_unicode=_SCREAMING_SNAKE_CASE , encoding='''utf-8''' , ).decode('''utf-8''' )
__snake_case : Any = {
'image-classification': [],
'translation': [],
'image-segmentation': [],
'fill-mask': [],
'automatic-speech-recognition': [],
'token-classification': [],
'sentence-similarity': [],
'audio-classification': [],
'question-answering': [],
'summarization': [],
'zero-shot-classification': [],
'table-to-text': [],
'feature-extraction': [],
'other': [],
'multiple-choice': [],
'text-classification': [],
'text-to-image': [],
'text2text-generation': [],
'zero-shot-image-classification': [],
'tabular-classification': [],
'tabular-regression': [],
'image-to-image': [],
'tabular-to-text': [],
'unconditional-image-generation': [],
'text-retrieval': [],
'text-to-speech': [],
'object-detection': [],
'audio-to-audio': [],
'text-generation': [],
'conversational': [],
'table-question-answering': [],
'visual-question-answering': [],
'image-to-text': [],
'reinforcement-learning': [],
'voice-activity-detection': [],
'time-series-forecasting': [],
'document-question-answering': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
__snake_case : Optional[int] = ArgumentParser(usage='Validate the yaml metadata block of a README.md file.')
ap.add_argument('readme_filepath')
__snake_case : List[str] = ap.parse_args()
__snake_case : Union[str, Any] = Path(args.readme_filepath)
__snake_case : List[Any] = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 354 | '''simple docstring'''
import math
def _UpperCAmelCase ( _UpperCamelCase : float, _UpperCamelCase : float ) -> float:
if initial_intensity < 0:
raise ValueError('''The value of intensity cannot be negative''' )
# handling of negative values of initial intensity
if angle < 0 or angle > 3_60:
raise ValueError('''In Malus Law, the angle is in the range 0-360 degrees''' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(_UpperCamelCase ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law')
| 18 | 0 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
SCREAMING_SNAKE_CASE__ : Tuple = 128022
SCREAMING_SNAKE_CASE__ : Optional[Any] = 128028
@require_sentencepiece
class UpperCamelCase__ (lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = MaMaaaTokenizer
lowerCamelCase_ : List[str] = False
lowerCamelCase_ : Any = False
lowerCamelCase_ : List[str] = True
def _lowercase ( self ) -> List[str]:
super().setUp()
lowerCamelCase : Union[str, Any] = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
lowerCamelCase : str = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
lowerCamelCase : List[str] = Path(self.tmpdirname )
save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["spm_file"] )
lowerCamelCase : Tuple = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase ( self , **UpperCamelCase__ ) -> Union[str, Any]:
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ ) -> List[Any]:
return (
"This is a test",
"This is a test",
)
def _lowercase ( self ) -> Dict:
lowerCamelCase : str = "</s>"
lowerCamelCase : int = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def _lowercase ( self ) -> Optional[int]:
lowerCamelCase : Tuple = self.get_tokenizer()
lowerCamelCase : int = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<s>" )
self.assertEqual(len(UpperCamelCase__ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("Skip this test while all models are still to be uploaded." )
def _lowercase ( self ) -> int:
pass
def _lowercase ( self ) -> Dict:
lowerCamelCase : List[Any] = self.get_tokenizer()
lowerCamelCase : Optional[Any] = tokenizer.tokenize("This is a test" )
self.assertListEqual(UpperCamelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [2, 3, 4, 5, 6] , )
lowerCamelCase : List[Any] = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(UpperCamelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
lowerCamelCase : str = tokenizer.convert_tokens_to_string(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , "This is a test" )
@slow
def _lowercase ( self ) -> Optional[int]:
# fmt: off
lowerCamelCase : Optional[int] = {"input_ids": [[12_8022, 11_0108, 397, 11, 3_8272, 2247, 12_4811, 285, 1_8105, 1586, 207, 7, 3_9534, 4428, 397, 1019, 1_8105, 1586, 207, 7, 4_1337, 1_6786, 241, 7, 2_0214, 17, 12_5690, 1_0398, 7, 4_4378, 5_8069, 6_8342, 7798, 7343, 11, 299, 3_3310, 4, 158, 3_7350, 9_4077, 4569, 299, 3_3310, 90, 4, 5_2840, 290, 4, 3_1270, 112, 299, 682, 4, 5_2840, 3_9953, 1_4079, 193, 5_2519, 9_0894, 1_7894, 12_0697, 11, 4_0445, 551, 17, 1019, 5_2519, 9_0894, 1_7756, 963, 11, 4_0445, 480, 17, 9792, 1120, 5173, 1393, 6240, 1_6786, 241, 12_0996, 28, 1245, 1393, 11_8240, 1_1123, 1019, 9_3612, 2691, 1_0618, 9_8058, 12_0409, 1928, 279, 4, 4_0683, 367, 178, 207, 1019, 103, 10_3121, 506, 6_5296, 5, 2], [12_8022, 2_1217, 367, 117, 12_5450, 128, 719, 7, 7308, 40, 9_3612, 1_2669, 1116, 1_6704, 71, 1_7785, 3699, 1_5592, 35, 144, 9584, 241, 1_1943, 713, 950, 799, 2247, 8_8427, 150, 149, 11_8813, 12_0706, 1019, 10_6906, 8_1518, 28, 1224, 2_2799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [12_8022, 1658, 12_3311, 5155, 5578, 4722, 279, 1_4947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = """facebook/m2m100_418M"""
lowerCamelCase_ : Optional[int] = [
"""In my opinion, there are two levels of response from the French government.""",
"""NSA Affair Emphasizes Complete Lack of Debate on Intelligence""",
]
lowerCamelCase_ : List[Any] = [
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
]
# fmt: off
lowerCamelCase_ : List[str] = [EN_CODE, 5_9_3, 1_9_4_9, 1_1_5_7_8_1, 4, 7_1_5_8_6, 4_2_3_4, 6_0_6_3_3, 1_2_6_2_3_3, 4_3_2, 1_2_3_8_0_8, 1_5_5_9_2, 1_1_9_7, 1_1_7_1_3_2, 1_2_0_6_1_8, 5, 2]
@classmethod
def _lowercase ( cls ) -> List[Any]:
lowerCamelCase : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en" , tgt_lang="fr" )
lowerCamelCase : Dict = 1
return cls
def _lowercase ( self ) -> Tuple:
self.assertEqual(self.tokenizer.get_lang_id("ar" ) , 12_8006 )
self.assertEqual(self.tokenizer.get_lang_id("en" ) , 12_8022 )
self.assertEqual(self.tokenizer.get_lang_id("ro" ) , 12_8076 )
self.assertEqual(self.tokenizer.get_lang_id("mr" ) , 12_8063 )
def _lowercase ( self ) -> Dict:
lowerCamelCase : List[str] = self.tokenizer.get_vocab()
self.assertEqual(len(UpperCamelCase__ ) , self.tokenizer.vocab_size )
self.assertEqual(vocab["<unk>"] , 3 )
self.assertIn(self.tokenizer.get_lang_token("en" ) , UpperCamelCase__ )
def _lowercase ( self ) -> Tuple:
lowerCamelCase : Any = "en"
lowerCamelCase : Optional[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase__ )
def _lowercase ( self ) -> int:
self.assertIn(UpperCamelCase__ , self.tokenizer.all_special_ids )
# fmt: off
lowerCamelCase : Any = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 1_4028, 136, 3286, 9706, 6, 9_0797, 6, 14_4012, 162, 8_8128, 3_0061, 5, 2]
# fmt: on
lowerCamelCase : List[str] = self.tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
lowerCamelCase : Any = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertNotIn(self.tokenizer.eos_token , UpperCamelCase__ )
def _lowercase ( self ) -> Optional[int]:
lowerCamelCase : List[Any] = tempfile.mkdtemp()
lowerCamelCase : List[Any] = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(UpperCamelCase__ )
lowerCamelCase : Tuple = MaMaaaTokenizer.from_pretrained(UpperCamelCase__ )
self.assertDictEqual(new_tok.lang_token_to_id , UpperCamelCase__ )
@require_torch
def _lowercase ( self ) -> Any:
lowerCamelCase : Any = "en"
lowerCamelCase : Optional[Any] = "fr"
lowerCamelCase : List[Any] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCamelCase__ , return_tensors="pt" )
lowerCamelCase : Dict = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
lowerCamelCase : Any = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def _lowercase ( self ) -> Optional[int]:
lowerCamelCase : Union[str, Any] = "mr"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
lowerCamelCase : List[str] = "zh"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def _lowercase ( self ) -> Optional[int]:
lowerCamelCase : List[Any] = "mr"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
lowerCamelCase : Dict = "zh"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def _lowercase ( self ) -> Union[str, Any]:
lowerCamelCase : List[Any] = self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar" )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , {
# en_XX, A, test, EOS
"input_ids": [[12_8022, 58, 4183, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 12_8006,
} , )
| 48 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all BART models at https://huggingface.co/models?filter=bart
lowerCAmelCase__ = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json''',
},
}
lowerCAmelCase__ = {
'''facebook/bart-base''': 1024,
'''facebook/bart-large''': 1024,
'''facebook/bart-large-mnli''': 1024,
'''facebook/bart-large-cnn''': 1024,
'''facebook/bart-large-xsum''': 1024,
'''yjernite/bart_eli5''': 1024,
}
class __snake_case ( _lowercase):
snake_case__ : Any = VOCAB_FILES_NAMES
snake_case__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : Optional[int] = ["input_ids", "attention_mask"]
snake_case__ : Any = BartTokenizer
def __init__( self : int , __lowerCAmelCase : Dict=None , __lowerCAmelCase : int=None , __lowerCAmelCase : Dict=None , __lowerCAmelCase : List[Any]="replace" , __lowerCAmelCase : Any="<s>" , __lowerCAmelCase : Optional[int]="</s>" , __lowerCAmelCase : str="</s>" , __lowerCAmelCase : Dict="<s>" , __lowerCAmelCase : Union[str, Any]="<unk>" , __lowerCAmelCase : Any="<pad>" , __lowerCAmelCase : Optional[Any]="<mask>" , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Dict=True , **__lowerCAmelCase : Optional[Any] , ):
"""simple docstring"""
super().__init__(
__lowerCAmelCase , __lowerCAmelCase , tokenizer_file=__lowerCAmelCase , errors=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase , **__lowerCAmelCase , )
_lowerCamelCase : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __lowerCAmelCase ) != add_prefix_space:
_lowerCamelCase : Dict = getattr(__lowerCAmelCase , pre_tok_state.pop('''type''' ) )
_lowerCamelCase : Any = add_prefix_space
_lowerCamelCase : int = pre_tok_class(**__lowerCAmelCase )
_lowerCamelCase : Optional[int] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_lowerCamelCase : List[str] = '''post_processor'''
_lowerCamelCase : List[str] = getattr(self.backend_tokenizer , __lowerCAmelCase , __lowerCAmelCase )
if tokenizer_component_instance:
_lowerCamelCase : int = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCamelCase : Tuple = tuple(state['''sep'''] )
if "cls" in state:
_lowerCamelCase : int = tuple(state['''cls'''] )
_lowerCamelCase : Union[str, Any] = False
if state.get('''add_prefix_space''' , __lowerCAmelCase ) != add_prefix_space:
_lowerCamelCase : Dict = add_prefix_space
_lowerCamelCase : Optional[Any] = True
if state.get('''trim_offsets''' , __lowerCAmelCase ) != trim_offsets:
_lowerCamelCase : Any = trim_offsets
_lowerCamelCase : str = True
if changes_to_apply:
_lowerCamelCase : List[str] = getattr(__lowerCAmelCase , state.pop('''type''' ) )
_lowerCamelCase : str = component_class(**__lowerCAmelCase )
setattr(self.backend_tokenizer , __lowerCAmelCase , __lowerCAmelCase )
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Tuple = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else value
_lowerCamelCase : str = value
def SCREAMING_SNAKE_CASE ( self : int , *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = kwargs.get('''is_split_into_words''' , __lowerCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*__lowerCAmelCase , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , *__lowerCAmelCase : Tuple , **__lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Any = kwargs.get('''is_split_into_words''' , __lowerCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*__lowerCAmelCase , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ):
"""simple docstring"""
_lowerCamelCase : Tuple = self._tokenizer.model.save(__lowerCAmelCase , name=__lowerCAmelCase )
return tuple(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str]=None ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ):
"""simple docstring"""
_lowerCamelCase : List[str] = [self.sep_token_id]
_lowerCamelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 72 | 0 |
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
def __lowerCamelCase ( self : Optional[int] ) ->Optional[int]:
lowerCamelCase__ : Any = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def __lowerCamelCase ( self : Optional[Any] ) ->Dict:
with self.assertRaises(A ):
lowerCamelCase__ : Optional[int] = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def __lowerCamelCase ( self : Union[str, Any] ) ->Optional[int]:
with self.assertRaises(A ):
lowerCamelCase__ : Tuple = pa.array(TypedSequence([1, 2, 3] , try_type=Value('''bool''' ) , type=Value('''int64''' ) ) )
def __lowerCamelCase ( self : str ) ->Optional[Any]:
lowerCamelCase__ : Union[str, Any] = pa.array(TypedSequence([1, 2, 3] , type=Value('''int32''' ) ) )
self.assertEqual(arr.type , pa.intaa() )
def __lowerCamelCase ( self : List[Any] ) ->int:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
lowerCamelCase__ : Dict = pa.array(TypedSequence(['''foo''', '''bar'''] , type=Value('''int64''' ) ) )
def __lowerCamelCase ( self : List[str] ) ->str:
lowerCamelCase__ : List[Any] = pa.array(TypedSequence([1, 2, 3] , try_type=Value('''int32''' ) ) )
self.assertEqual(arr.type , pa.intaa() )
def __lowerCamelCase ( self : Any ) ->List[str]:
lowerCamelCase__ : Optional[Any] = pa.array(TypedSequence(['''foo''', '''bar'''] , try_type=Value('''int64''' ) ) )
self.assertEqual(arr.type , pa.string() )
def __lowerCamelCase ( self : Optional[int] ) ->int:
lowerCamelCase__ : int = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , '''int64''' ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , '''int64''' ) )
def __lowerCamelCase ( self : Dict ) ->Dict:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
lowerCamelCase__ : Optional[int] = pa.array(TypedSequence(['''foo''', '''bar'''] , type=ArrayaD((1, 3) , '''int64''' ) ) )
def __lowerCamelCase ( self : Tuple ) ->Any:
lowerCamelCase__ : Optional[Any] = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , '''int64''' ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , '''int64''' ) )
def __lowerCamelCase ( self : str ) ->Optional[int]:
lowerCamelCase__ : Any = pa.array(TypedSequence(['''foo''', '''bar'''] , try_type=ArrayaD((1, 3) , '''int64''' ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def __lowerCamelCase ( self : Optional[int] ) ->Optional[Any]:
import PIL.Image
lowerCamelCase__ : Dict = PIL.Image.fromarray(np.arange(1_0 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
'''datasets.arrow_writer.cast_to_python_objects''' , side_effect=A ) as mock_cast_to_python_objects:
lowerCamelCase__ : Tuple = pa.array(TypedSequence([{'''path''': None, '''bytes''': B'''image_bytes'''}, pil_image] , type=Image() ) )
lowerCamelCase__ , lowerCamelCase__ : Tuple = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn('''optimize_list_casting''' , A )
self.assertFalse(kwargs['''optimize_list_casting'''] )
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Any:
"""simple docstring"""
lowerCamelCase__ : int = pa.BufferReader(UpperCAmelCase ) if isinstance(UpperCAmelCase , pa.Buffer ) else pa.memory_map(UpperCAmelCase )
lowerCamelCase__ : str = pa.ipc.open_stream(UpperCAmelCase )
lowerCamelCase__ : pa.Table = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 10] )
@pytest.mark.parametrize(
'''fields''' , [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] )
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ : str = pa.BufferOutputStream()
lowerCamelCase__ : List[str] = pa.schema(UpperCAmelCase ) if fields else None
with ArrowWriter(stream=UpperCAmelCase , schema=UpperCAmelCase , writer_batch_size=UpperCAmelCase ) as writer:
writer.write({'''col_1''': '''foo''', '''col_2''': 1} )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} )
lowerCamelCase__ , lowerCamelCase__ : Tuple = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowerCamelCase__ : Optional[int] = {'''col_1''': pa.string(), '''col_2''': pa.intaa()}
assert writer._schema == pa.schema(UpperCAmelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _a ( ) -> int:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = pa.BufferOutputStream()
lowerCamelCase__ : Optional[int] = Features({'''labels''': ClassLabel(names=['''neg''', '''pos'''] )} )
with ArrowWriter(stream=UpperCAmelCase , features=UpperCAmelCase ) as writer:
writer.write({'''labels''': 0} )
writer.write({'''labels''': 1} )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
lowerCamelCase__ : Tuple = pa.BufferReader(output.getvalue() )
lowerCamelCase__ : Union[str, Any] = pa.ipc.open_stream(UpperCAmelCase )
lowerCamelCase__ : pa.Table = f.read_all()
lowerCamelCase__ : Union[str, Any] = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(UpperCAmelCase )
@pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 10] )
def _a ( UpperCAmelCase ) -> str:
"""simple docstring"""
lowerCamelCase__ : List[str] = pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCAmelCase , writer_batch_size=UpperCAmelCase , hash_salt='''split_name''' , check_duplicates=UpperCAmelCase , ) as writer:
with pytest.raises(UpperCAmelCase ):
writer.write({'''col_1''': '''foo''', '''col_2''': 1} , key=[1, 2] )
lowerCamelCase__ , lowerCamelCase__ : int = writer.finalize()
@pytest.mark.parametrize('''writer_batch_size''' , [None, 2, 10] )
def _a ( UpperCAmelCase ) -> int:
"""simple docstring"""
lowerCamelCase__ : Optional[int] = pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCAmelCase , writer_batch_size=UpperCAmelCase , hash_salt='''split_name''' , check_duplicates=UpperCAmelCase , ) as writer:
with pytest.raises(UpperCAmelCase ):
writer.write({'''col_1''': '''foo''', '''col_2''': 1} , key=10 )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} , key=10 )
lowerCamelCase__ , lowerCamelCase__ : Tuple = writer.finalize()
@pytest.mark.parametrize('''writer_batch_size''' , [None, 2, 10] )
def _a ( UpperCAmelCase ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCAmelCase , writer_batch_size=UpperCAmelCase , hash_salt='''split_name''' , check_duplicates=UpperCAmelCase , ) as writer:
writer.write({'''col_1''': '''foo''', '''col_2''': 1} , key=1 )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} , key=2 )
lowerCamelCase__ , lowerCamelCase__ : List[str] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 10] )
@pytest.mark.parametrize(
'''fields''' , [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] )
def _a ( UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__ : List[Any] = pa.BufferOutputStream()
lowerCamelCase__ : List[str] = pa.schema(UpperCAmelCase ) if fields else None
with ArrowWriter(stream=UpperCAmelCase , schema=UpperCAmelCase , writer_batch_size=UpperCAmelCase ) as writer:
writer.write_batch({'''col_1''': ['''foo''', '''bar'''], '''col_2''': [1, 2]} )
writer.write_batch({'''col_1''': [], '''col_2''': []} )
lowerCamelCase__ , lowerCamelCase__ : Tuple = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowerCamelCase__ : Optional[int] = {'''col_1''': pa.string(), '''col_2''': pa.intaa()}
assert writer._schema == pa.schema(UpperCAmelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 10] )
@pytest.mark.parametrize(
'''fields''' , [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] )
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__ : List[Any] = pa.BufferOutputStream()
lowerCamelCase__ : Any = pa.schema(UpperCAmelCase ) if fields else None
with ArrowWriter(stream=UpperCAmelCase , schema=UpperCAmelCase , writer_batch_size=UpperCAmelCase ) as writer:
writer.write_table(pa.Table.from_pydict({'''col_1''': ['''foo''', '''bar'''], '''col_2''': [1, 2]} ) )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowerCamelCase__ : List[str] = {'''col_1''': pa.string(), '''col_2''': pa.intaa()}
assert writer._schema == pa.schema(UpperCAmelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 10] )
@pytest.mark.parametrize(
'''fields''' , [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] )
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Dict:
"""simple docstring"""
lowerCamelCase__ : Optional[int] = pa.BufferOutputStream()
lowerCamelCase__ : List[str] = pa.schema(UpperCAmelCase ) if fields else None
with ArrowWriter(stream=UpperCAmelCase , schema=UpperCAmelCase , writer_batch_size=UpperCAmelCase ) as writer:
writer.write_row(pa.Table.from_pydict({'''col_1''': ['''foo'''], '''col_2''': [1]} ) )
writer.write_row(pa.Table.from_pydict({'''col_1''': ['''bar'''], '''col_2''': [2]} ) )
lowerCamelCase__ , lowerCamelCase__ : Tuple = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowerCamelCase__ : Any = {'''col_1''': pa.string(), '''col_2''': pa.intaa()}
assert writer._schema == pa.schema(UpperCAmelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _a ( ) -> Optional[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase__ : Union[str, Any] = {'''col_1''': pa.string(), '''col_2''': pa.intaa()}
lowerCamelCase__ : Any = os.path.join(UpperCAmelCase , '''test.arrow''' )
with ArrowWriter(path=UpperCAmelCase , schema=pa.schema(UpperCAmelCase ) ) as writer:
writer.write_batch({'''col_1''': ['''foo''', '''bar'''], '''col_2''': [1, 2]} )
lowerCamelCase__ , lowerCamelCase__ : str = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(UpperCAmelCase , metadata=writer._schema.metadata )
_check_output(UpperCAmelCase , 1 )
def _a ( UpperCAmelCase ) -> List[str]:
"""simple docstring"""
if pa.types.is_list(UpperCAmelCase ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def _a ( UpperCAmelCase , UpperCAmelCase ) -> int:
"""simple docstring"""
if isinstance(lst[0] , UpperCAmelCase ):
change_first_primitive_element_in_list(lst[0] , UpperCAmelCase )
else:
lowerCamelCase__ : Optional[Any] = value
@pytest.mark.parametrize('''optimized_int_type, expected_dtype''' , [(None, pa.intaa()), (Value('''int32''' ), pa.intaa())] )
@pytest.mark.parametrize('''sequence''' , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any:
"""simple docstring"""
lowerCamelCase__ : Dict = pa.array(TypedSequence(UpperCAmelCase , optimized_int_type=UpperCAmelCase ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
'''col, expected_dtype''' , [
('''attention_mask''', pa.inta()),
('''special_tokens_mask''', pa.inta()),
('''token_type_ids''', pa.inta()),
('''input_ids''', pa.intaa()),
('''other''', pa.intaa()),
] , )
@pytest.mark.parametrize('''sequence''' , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
"""simple docstring"""
# in range
lowerCamelCase__ : Tuple = pa.array(OptimizedTypedSequence(UpperCAmelCase , col=UpperCAmelCase ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
lowerCamelCase__ : str = copy.deepcopy(UpperCAmelCase )
lowerCamelCase__ : Any = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Dict = pa.array(OptimizedTypedSequence(UpperCAmelCase , col=UpperCAmelCase ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize('''raise_exception''' , [False, True] )
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__ : Dict = str(tmp_path / '''dataset-train.arrow''' )
try:
with ArrowWriter(path=UpperCAmelCase ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def _a ( UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__ : Optional[int] = '''mock://dataset-train.arrow'''
with ArrowWriter(path=UpperCAmelCase , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(UpperCAmelCase ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({'''col_1''': '''foo''', '''col_2''': 1} )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(UpperCAmelCase )
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = pa.BufferOutputStream()
with ParquetWriter(stream=UpperCAmelCase ) as writer:
writer.write({'''col_1''': '''foo''', '''col_2''': 1} )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} )
lowerCamelCase__ , lowerCamelCase__ : List[str] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
lowerCamelCase__ : Dict = pa.BufferReader(output.getvalue() )
lowerCamelCase__ : pa.Table = pq.read_table(UpperCAmelCase )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize('''embed_local_files''' , [False, True] )
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
import PIL.Image
lowerCamelCase__ : Union[str, Any] = str(tmp_path / '''test_image_rgb.jpg''' )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(UpperCAmelCase , format='''png''' )
lowerCamelCase__ : Optional[Any] = pa.BufferOutputStream()
with ParquetWriter(
stream=UpperCAmelCase , features=Features({'''image''': Image()} ) , embed_local_files=UpperCAmelCase ) as writer:
writer.write({'''image''': image_path} )
writer.finalize()
lowerCamelCase__ : str = pa.BufferReader(output.getvalue() )
lowerCamelCase__ : pa.Table = pq.read_table(UpperCAmelCase )
lowerCamelCase__ : Dict = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out['''image'''][0]['''path'''] , UpperCAmelCase )
with open(UpperCAmelCase , '''rb''' ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def _a ( ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = pa.schema([pa.field('''col_1''' , pa.string() , nullable=UpperCAmelCase )] )
lowerCamelCase__ : int = pa.BufferOutputStream()
with ArrowWriter(stream=UpperCAmelCase ) as writer:
writer._build_writer(inferred_schema=UpperCAmelCase )
assert writer._schema == pa.schema([pa.field('''col_1''' , pa.string() )] )
| 265 |
import pprint
import requests
_A : Any = 'https://zenquotes.io/api'
def _a ( ) -> list:
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + '''/today''' ).json()
def _a ( ) -> list:
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + '''/random''' ).json()
if __name__ == "__main__":
_A : Optional[Any] = random_quotes()
pprint.pprint(response)
| 265 | 1 |
'''simple docstring'''
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def UpperCAmelCase_ ( __lowercase : int , __lowercase : Optional[int] , __lowercase : Union[str, Any] ) -> str:
'''simple docstring'''
_UpperCAmelCase = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, oder?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_UpperCAmelCase = {
"ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"],
"en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"],
"en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"],
"de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"],
}
_UpperCAmelCase = f'{src_lang}-{tgt_lang}'
_UpperCAmelCase = f'\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "facebook/wmt19-{src_lang}-{tgt_lang}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n'
os.makedirs(__lowercase , exist_ok=__lowercase )
_UpperCAmelCase = os.path.join(__lowercase , "README.md" )
print(f'Generating {path}' )
with open(__lowercase , "w" , encoding="utf-8" ) as f:
f.write(__lowercase )
# make sure we are under the root of the project
__SCREAMING_SNAKE_CASE :Optional[Any] = Path(__file__).resolve().parent.parent.parent
__SCREAMING_SNAKE_CASE :Any = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Optional[Any] = model_name.split('''-''')
__SCREAMING_SNAKE_CASE :List[str] = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 22 | '''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__lowercase = logging.get_logger(__name__)
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = ['''input_features''']
def __init__( self , __lowerCAmelCase=80 , __lowerCAmelCase=16000 , __lowerCAmelCase=160 , __lowerCAmelCase=30 , __lowerCAmelCase=400 , __lowerCAmelCase=0.0 , __lowerCAmelCase=False , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(
feature_size=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , padding_value=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , **__lowerCAmelCase , )
lowerCAmelCase = n_fft
lowerCAmelCase = hop_length
lowerCAmelCase = chunk_length
lowerCAmelCase = chunk_length * sampling_rate
lowerCAmelCase = self.n_samples // hop_length
lowerCAmelCase = sampling_rate
lowerCAmelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__lowerCAmelCase , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=__lowerCAmelCase , norm="""slaney""" , mel_scale="""slaney""" , )
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = spectrogram(
__lowerCAmelCase , window_function(self.n_fft , """hann""") , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="""log10""" , )
lowerCAmelCase = log_spec[:, :-1]
lowerCAmelCase = np.maximum(__lowerCAmelCase , log_spec.max() - 8.0)
lowerCAmelCase = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def a_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0.0):
"""simple docstring"""
if attention_mask is not None:
lowerCAmelCase = np.array(__lowerCAmelCase , np.intaa)
lowerCAmelCase = []
for vector, length in zip(__lowerCAmelCase , attention_mask.sum(-1)):
lowerCAmelCase = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7)
if length < normed_slice.shape[0]:
lowerCAmelCase = padding_value
normed_input_values.append(__lowerCAmelCase)
else:
lowerCAmelCase = [(x - x.mean()) / np.sqrt(x.var() + 1E-7) for x in input_values]
return normed_input_values
def __call__( self , __lowerCAmelCase , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = "max_length" , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
f" was sampled with {self.sampling_rate} and not {sampling_rate}.")
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""")
lowerCAmelCase = isinstance(__lowerCAmelCase , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}")
lowerCAmelCase = is_batched_numpy or (
isinstance(__lowerCAmelCase , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
lowerCAmelCase = [np.asarray([speech] , dtype=np.floataa).T for speech in raw_speech]
elif not is_batched and not isinstance(__lowerCAmelCase , np.ndarray):
lowerCAmelCase = np.asarray(__lowerCAmelCase , dtype=np.floataa)
elif isinstance(__lowerCAmelCase , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
lowerCAmelCase = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
lowerCAmelCase = [np.asarray([raw_speech]).T]
lowerCAmelCase = BatchFeature({"""input_features""": raw_speech})
# convert into correct format for padding
lowerCAmelCase = self.pad(
__lowerCAmelCase , padding=__lowerCAmelCase , max_length=max_length if max_length else self.n_samples , truncation=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
lowerCAmelCase = self.zero_mean_unit_var_norm(
padded_inputs["""input_features"""] , attention_mask=padded_inputs["""attention_mask"""] , padding_value=self.padding_value , )
lowerCAmelCase = np.stack(padded_inputs["""input_features"""] , axis=0)
# make sure list is in array format
lowerCAmelCase = padded_inputs.get("""input_features""").transpose(2 , 0 , 1)
lowerCAmelCase = [self._np_extract_fbank_features(__lowerCAmelCase) for waveform in input_features[0]]
if isinstance(input_features[0] , __lowerCAmelCase):
lowerCAmelCase = [np.asarray(__lowerCAmelCase , dtype=np.floataa) for feature in input_features]
else:
lowerCAmelCase = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
lowerCAmelCase = padded_inputs["""attention_mask"""][:, :: self.hop_length]
if return_tensors is not None:
lowerCAmelCase = padded_inputs.convert_to_tensors(__lowerCAmelCase)
return padded_inputs
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = copy.deepcopy(self.__dict__)
lowerCAmelCase = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 272 | 0 |
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
class A__ ( _A ):
lowercase = ['input_ids', 'attention_mask']
def __init__( self : Dict , a : Tuple="</s>" , a : Optional[int]="<unk>" , a : List[Any]="<pad>" , a : List[Any]=125 , a : Dict=None , **a : str , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
lowerCAmelCase__ : Optional[Any] = [f'''<extra_id_{i}>''' for i in range(__SCREAMING_SNAKE_CASE )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
lowerCAmelCase__ : Union[str, Any] = len(set(filter(lambda a : bool('extra_id' in str(__SCREAMING_SNAKE_CASE ) ) , __SCREAMING_SNAKE_CASE ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the'
' extra_ids tokens' )
lowerCAmelCase__ : int = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else pad_token
lowerCAmelCase__ : Any = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else eos_token
lowerCAmelCase__ : Tuple = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else unk_token
super().__init__(
eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , extra_ids=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
lowerCAmelCase__ : int = extra_ids
lowerCAmelCase__ : int = 2**8 # utf is 8 bits
# define special tokens dict
lowerCAmelCase__ : Tuple = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
lowerCAmelCase__ : Union[str, Any] = len(self.special_tokens_encoder )
lowerCAmelCase__ : List[Any] = len(__SCREAMING_SNAKE_CASE )
for i, token in enumerate(__SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ : Tuple = self.vocab_size + i - n
lowerCAmelCase__ : Optional[Any] = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def _lowerCamelCase ( self : Dict , a : List[int] , a : Optional[List[int]] = None , a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
return ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
def _lowerCamelCase ( self : Optional[Any] , a : List[int] ):
'''simple docstring'''
if len(__SCREAMING_SNAKE_CASE ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def _lowerCamelCase ( self : Optional[Any] , a : List[int] , a : Optional[List[int]] = None ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _lowerCamelCase ( self : int , a : List[int] , a : Optional[List[int]] = None ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = self._add_eos_if_not_present(__SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return token_ids_a
else:
lowerCAmelCase__ : str = self._add_eos_if_not_present(__SCREAMING_SNAKE_CASE )
return token_ids_a + token_ids_a
def _lowerCamelCase ( self : Tuple , a : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = [chr(__SCREAMING_SNAKE_CASE ) for i in text.encode('utf-8' )]
return tokens
def _lowerCamelCase ( self : str , a : Tuple ):
'''simple docstring'''
if token in self.special_tokens_encoder:
lowerCAmelCase__ : Optional[int] = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
lowerCAmelCase__ : Optional[Any] = self.added_tokens_encoder[token]
elif len(__SCREAMING_SNAKE_CASE ) != 1:
lowerCAmelCase__ : Optional[Any] = self.unk_token_id
else:
lowerCAmelCase__ : Optional[Any] = ord(__SCREAMING_SNAKE_CASE ) + self._num_special_tokens
return token_id
def _lowerCamelCase ( self : Optional[int] , a : Union[str, Any] ):
'''simple docstring'''
if index in self.special_tokens_decoder:
lowerCAmelCase__ : Optional[int] = self.special_tokens_decoder[index]
else:
lowerCAmelCase__ : Union[str, Any] = chr(index - self._num_special_tokens )
return token
def _lowerCamelCase ( self : Optional[Any] , a : Dict ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = b''
for token in tokens:
if token in self.special_tokens_decoder:
lowerCAmelCase__ : Optional[Any] = self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.added_tokens_decoder:
lowerCAmelCase__ : str = self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.special_tokens_encoder:
lowerCAmelCase__ : Dict = token.encode('utf-8' )
elif token in self.added_tokens_encoder:
lowerCAmelCase__ : List[str] = token.encode('utf-8' )
else:
lowerCAmelCase__ : List[str] = bytes([ord(__SCREAMING_SNAKE_CASE )] )
bstring += tok_string
lowerCAmelCase__ : int = bstring.decode('utf-8' , errors='ignore' )
return string
def _lowerCamelCase ( self : Dict , a : str , a : Optional[str] = None ):
'''simple docstring'''
return () | 363 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = UnCLIPImageVariationPipeline
lowercase = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'}
lowercase = IMAGE_VARIATION_BATCH_PARAMS
lowercase = [
'generator',
'return_dict',
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
lowercase = False
@property
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
return 32
@property
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return 32
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
return self.time_input_dim
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return 100
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(a )
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : List[Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(a )
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Union[str, Any] = {
'clip_embeddings_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'cross_attention_dim': self.cross_attention_dim,
}
lowerCAmelCase__ : Optional[Any] = UnCLIPTextProjModel(**a )
return model
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : str = {
'sample_size': 32,
# RGB in channels
'in_channels': 3,
# Out channels is double in channels because predicts mean and variance
'out_channels': 6,
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': 'identity',
}
lowerCAmelCase__ : str = UNetaDConditionModel(**a )
return model
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Any = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
torch.manual_seed(1 )
lowerCAmelCase__ : List[str] = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.dummy_decoder
lowerCAmelCase__ : Optional[int] = self.dummy_text_proj
lowerCAmelCase__ : Any = self.dummy_text_encoder
lowerCAmelCase__ : Any = self.dummy_tokenizer
lowerCAmelCase__ : Any = self.dummy_super_res_first
lowerCAmelCase__ : Optional[int] = self.dummy_super_res_last
lowerCAmelCase__ : Dict = UnCLIPScheduler(
variance_type='learned_range' , prediction_type='epsilon' , num_train_timesteps=1_000 , )
lowerCAmelCase__ : Any = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='epsilon' , num_train_timesteps=1_000 , )
lowerCAmelCase__ : Any = CLIPImageProcessor(crop_size=32 , size=32 )
lowerCAmelCase__ : Optional[int] = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def _lowerCamelCase ( self : Any , a : Dict , a : List[str]=0 , a : List[str]=True ):
'''simple docstring'''
lowerCAmelCase__ : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(a ) ).to(a )
if str(a ).startswith('mps' ):
lowerCAmelCase__ : Optional[int] = torch.manual_seed(a )
else:
lowerCAmelCase__ : str = torch.Generator(device=a ).manual_seed(a )
if pil_image:
lowerCAmelCase__ : Optional[int] = input_image * 0.5 + 0.5
lowerCAmelCase__ : Dict = input_image.clamp(0 , 1 )
lowerCAmelCase__ : List[Any] = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowerCAmelCase__ : Union[str, Any] = DiffusionPipeline.numpy_to_pil(a )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = 'cpu'
lowerCAmelCase__ : Any = self.get_dummy_components()
lowerCAmelCase__ : List[str] = self.pipeline_class(**a )
lowerCAmelCase__ : Dict = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Dict = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : str = pipe(**a )
lowerCAmelCase__ : Optional[Any] = output.images
lowerCAmelCase__ : str = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : Optional[int] = pipe(
**a , return_dict=a , )[0]
lowerCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : List[str] = np.array(
[
0.9_9_9_7,
0.0_0_0_2,
0.9_9_9_7,
0.9_9_9_7,
0.9_9_6_9,
0.0_0_2_3,
0.9_9_9_7,
0.9_9_6_9,
0.9_9_7_0,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = 'cpu'
lowerCAmelCase__ : Dict = self.get_dummy_components()
lowerCAmelCase__ : Optional[int] = self.pipeline_class(**a )
lowerCAmelCase__ : int = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : List[Any] = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : List[str] = pipe(**a )
lowerCAmelCase__ : Union[str, Any] = output.images
lowerCAmelCase__ : int = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : int = pipe(
**a , return_dict=a , )[0]
lowerCAmelCase__ : Tuple = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : str = np.array([0.9_9_9_7, 0.0_0_0_3, 0.9_9_9_7, 0.9_9_9_7, 0.9_9_7_0, 0.0_0_2_4, 0.9_9_9_7, 0.9_9_7_1, 0.9_9_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = 'cpu'
lowerCAmelCase__ : int = self.get_dummy_components()
lowerCAmelCase__ : Tuple = self.pipeline_class(**a )
lowerCAmelCase__ : Union[str, Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Tuple = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : List[str] = [
pipeline_inputs['image'],
pipeline_inputs['image'],
]
lowerCAmelCase__ : Optional[int] = pipe(**a )
lowerCAmelCase__ : Tuple = output.images
lowerCAmelCase__ : List[str] = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : Union[str, Any] = [
tuple_pipeline_inputs['image'],
tuple_pipeline_inputs['image'],
]
lowerCAmelCase__ : str = pipe(
**a , return_dict=a , )[0]
lowerCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
lowerCAmelCase__ : Union[str, Any] = np.array(
[
0.9_9_9_7,
0.9_9_8_9,
0.0_0_0_8,
0.0_0_2_1,
0.9_9_6_0,
0.0_0_1_8,
0.0_0_1_4,
0.0_0_0_2,
0.9_9_3_3,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = torch.device('cpu' )
class A__ :
lowercase = 1
lowerCAmelCase__ : Optional[Any] = self.get_dummy_components()
lowerCAmelCase__ : Dict = self.pipeline_class(**a )
lowerCAmelCase__ : Optional[Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Optional[int] = torch.Generator(device=a ).manual_seed(0 )
lowerCAmelCase__ : Optional[int] = pipe.decoder.dtype
lowerCAmelCase__ : Union[str, Any] = 1
lowerCAmelCase__ : str = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
lowerCAmelCase__ : List[Any] = pipe.prepare_latents(
a , dtype=a , device=a , generator=a , latents=a , scheduler=DummyScheduler() )
lowerCAmelCase__ : List[str] = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
lowerCAmelCase__ : Any = pipe.prepare_latents(
a , dtype=a , device=a , generator=a , latents=a , scheduler=DummyScheduler() )
lowerCAmelCase__ : List[Any] = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : Optional[int] = pipe(
**a , decoder_latents=a , super_res_latents=a ).images
lowerCAmelCase__ : Optional[Any] = self.get_dummy_inputs(a , pil_image=a )
# Don't pass image, instead pass embedding
lowerCAmelCase__ : Union[str, Any] = pipeline_inputs.pop('image' )
lowerCAmelCase__ : Union[str, Any] = pipe.image_encoder(a ).image_embeds
lowerCAmelCase__ : List[Any] = pipe(
**a , decoder_latents=a , super_res_latents=a , image_embeddings=a , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1E-4
@skip_mps
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = torch_device == 'cpu'
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
lowerCAmelCase__ : int = 1E-2
self._test_attention_slicing_forward_pass(
test_max_difference=a , expected_max_diff=a )
@skip_mps
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = torch_device == 'cpu'
lowerCAmelCase__ : Any = True
lowerCAmelCase__ : Optional[Any] = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
self._test_inference_batch_single_identical(
test_max_difference=a , relax_max_difference=a , additional_params_copy_to_batched_inputs=a , )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
lowerCAmelCase__ : List[str] = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=a , additional_params_copy_to_batched_inputs=a , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=a )
@skip_mps
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png' )
lowerCAmelCase__ : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/unclip/karlo_v1_alpha_cat_variation_fp16.npy' )
lowerCAmelCase__ : Tuple = UnCLIPImageVariationPipeline.from_pretrained(
'kakaobrain/karlo-v1-alpha-image-variations' , torch_dtype=torch.floataa )
lowerCAmelCase__ : Union[str, Any] = pipeline.to(a )
pipeline.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Dict = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase__ : List[str] = pipeline(
a , generator=a , output_type='np' , )
lowerCAmelCase__ : Union[str, Any] = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(a , a , 15 ) | 307 | 0 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("0.12.2"):
raise Exception("requires fairseq >= 0.12.2")
if version.parse(fairseq.__version__) > version.parse("2"):
raise Exception("requires fairseq < v2")
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = "Hello, World!"
__A = "en_XX"
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : bool ) -> Optional[Any]:
"""simple docstring"""
__lowerCamelCase = Path('data_bin' )
__lowerCamelCase = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(UpperCamelCase__ ).parent ) , checkpoint_file=Path(UpperCamelCase__ ).name , _name='xmod_base' , arch='xmod_base' , task='multilingual_masked_lm' , data_name_or_path=str(UpperCamelCase__ ) , bpe='sentencepiece' , sentencepiece_model=str(Path(UpperCamelCase__ ).parent / 'sentencepiece.bpe.model' ) , src_dict=str(data_dir / 'dict.txt' ) , )
xmod.eval() # disable dropout
print(UpperCamelCase__ )
__lowerCamelCase = xmod.model.encoder.sentence_encoder
__lowerCamelCase = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , 'bottleneck' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
__lowerCamelCase = xmod.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our X-MOD config:' , UpperCamelCase__ )
__lowerCamelCase = XmodForSequenceClassification(UpperCamelCase__ ) if classification_head else XmodForMaskedLM(UpperCamelCase__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
__lowerCamelCase = xmod_sent_encoder.embed_tokens.weight
__lowerCamelCase = xmod_sent_encoder.embed_positions.weight
__lowerCamelCase = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
__lowerCamelCase = xmod_sent_encoder.layernorm_embedding.weight
__lowerCamelCase = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__lowerCamelCase = model.roberta.encoder.layer[i]
__lowerCamelCase = xmod_sent_encoder.layers[i]
# self attention
__lowerCamelCase = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('Dimensions of self-attention weights do not match.' )
__lowerCamelCase = xmod_layer.self_attn.q_proj.weight
__lowerCamelCase = xmod_layer.self_attn.q_proj.bias
__lowerCamelCase = xmod_layer.self_attn.k_proj.weight
__lowerCamelCase = xmod_layer.self_attn.k_proj.bias
__lowerCamelCase = xmod_layer.self_attn.v_proj.weight
__lowerCamelCase = xmod_layer.self_attn.v_proj.bias
# self-attention output
__lowerCamelCase = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('Dimensions of self-attention output weights do not match.' )
__lowerCamelCase = xmod_layer.self_attn.out_proj.weight
__lowerCamelCase = xmod_layer.self_attn.out_proj.bias
__lowerCamelCase = xmod_layer.self_attn_layer_norm.weight
__lowerCamelCase = xmod_layer.self_attn_layer_norm.bias
# intermediate
__lowerCamelCase = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of intermediate weights do not match.' )
__lowerCamelCase = xmod_layer.fca.weight
__lowerCamelCase = xmod_layer.fca.bias
# output
__lowerCamelCase = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of feed-forward weights do not match.' )
__lowerCamelCase = xmod_layer.fca.weight
__lowerCamelCase = xmod_layer.fca.bias
__lowerCamelCase = xmod_layer.final_layer_norm.weight
__lowerCamelCase = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
__lowerCamelCase = xmod_layer.adapter_layer_norm.weight
__lowerCamelCase = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('Lists of language adapters do not match.' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
__lowerCamelCase = bert_output.adapter_modules[lang_code]
__lowerCamelCase = xmod_layer.adapter_modules[lang_code]
__lowerCamelCase = from_adapter.fca.weight
__lowerCamelCase = from_adapter.fca.bias
__lowerCamelCase = from_adapter.fca.weight
__lowerCamelCase = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
__lowerCamelCase = xmod_sent_encoder.layer_norm.weight
__lowerCamelCase = xmod_sent_encoder.layer_norm.bias
if classification_head:
__lowerCamelCase = xmod.model.classification_heads['mnli'].dense.weight
__lowerCamelCase = xmod.model.classification_heads['mnli'].dense.bias
__lowerCamelCase = xmod.model.classification_heads['mnli'].out_proj.weight
__lowerCamelCase = xmod.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
__lowerCamelCase = xmod.model.encoder.lm_head.dense.weight
__lowerCamelCase = xmod.model.encoder.lm_head.dense.bias
__lowerCamelCase = xmod.model.encoder.lm_head.layer_norm.weight
__lowerCamelCase = xmod.model.encoder.lm_head.layer_norm.bias
__lowerCamelCase = xmod.model.encoder.lm_head.weight
__lowerCamelCase = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
__lowerCamelCase = xmod.encode(UpperCamelCase__ ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(UpperCamelCase__ )
__lowerCamelCase = model(UpperCamelCase__ )[0]
if classification_head:
__lowerCamelCase = xmod.model.classification_heads['mnli'](xmod.extract_features(UpperCamelCase__ ) )
else:
__lowerCamelCase = xmod.model(UpperCamelCase__ , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
__lowerCamelCase = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
__lowerCamelCase = torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
Path(UpperCamelCase__ ).mkdir(parents=UpperCamelCase__ , exist_ok=UpperCamelCase__ )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xmod_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
__A = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 90 |
'''simple docstring'''
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
lowerCamelCase :Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , **lowercase ):
super().__init__(**lowercase )
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
# No specific FOR_XXX available yet
def __call__(self , lowercase , **lowercase ):
return super().__call__(lowercase , **lowercase )
def _a (self , **lowercase ):
A_ : Tuple = {}
if "candidate_labels" in kwargs:
A_ : Dict = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
A_ : Optional[Any] = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def _a (self , lowercase , lowercase=None , lowercase="This is a sound of {}." ):
if isinstance(lowercase , lowercase ):
if audio.startswith("""http://""" ) or audio.startswith("""https://""" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
A_ : Dict = requests.get(lowercase ).content
else:
with open(lowercase , """rb""" ) as f:
A_ : List[str] = f.read()
if isinstance(lowercase , lowercase ):
A_ : List[Any] = ffmpeg_read(lowercase , self.feature_extractor.sampling_rate )
if not isinstance(lowercase , np.ndarray ):
raise ValueError("""We expect a numpy ndarray as input""" )
if len(audio.shape ) != 1:
raise ValueError("""We expect a single channel audio input for ZeroShotAudioClassificationPipeline""" )
A_ : int = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="""pt""" )
A_ : List[Any] = candidate_labels
A_ : str = [hypothesis_template.format(lowercase ) for x in candidate_labels]
A_ : Optional[Any] = self.tokenizer(lowercase , return_tensors=self.framework , padding=lowercase )
A_ : Optional[Any] = [text_inputs]
return inputs
def _a (self , lowercase ):
A_ : Union[str, Any] = model_inputs.pop("""candidate_labels""" )
A_ : List[Any] = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , lowercase ):
A_ : Union[str, Any] = text_inputs[0]
else:
# Batching case.
A_ : Optional[int] = text_inputs[0][0]
A_ : str = self.model(**lowercase , **lowercase )
A_ : Union[str, Any] = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_audio,
}
return model_outputs
def _a (self , lowercase ):
A_ : Union[str, Any] = model_outputs.pop("""candidate_labels""" )
A_ : List[Any] = model_outputs["""logits"""][0]
if self.framework == "pt":
A_ : Optional[Any] = logits.softmax(dim=0 )
A_ : str = probs.tolist()
else:
raise ValueError("""`tf` framework not supported.""" )
A_ : Optional[int] = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(lowercase , lowercase ) , key=lambda lowercase : -x[0] )
]
return result | 206 | 0 |
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class __UpperCAmelCase (_UpperCAmelCase ):
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE = 8
# DPR tok
_SCREAMING_SNAKE_CASE = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , """dpr_tokenizer""" )
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = os.path.join(UpperCAmelCase_ , DPR_VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
# BART tok
_SCREAMING_SNAKE_CASE = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
_SCREAMING_SNAKE_CASE = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
_SCREAMING_SNAKE_CASE = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_SCREAMING_SNAKE_CASE = {"""unk_token""": """<unk>"""}
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , """bart_tokenizer""" )
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = os.path.join(UpperCAmelCase_ , BART_VOCAB_FILES_NAMES["""vocab_file"""] )
_SCREAMING_SNAKE_CASE = os.path.join(UpperCAmelCase_ , BART_VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCAmelCase_ ) )
def UpperCamelCase ( self: str ):
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , """bart_tokenizer""" ) )
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = Dataset.from_dict(
{
"""id""": ["""0""", """1"""],
"""text""": ["""foo""", """bar"""],
"""title""": ["""Foo""", """Bar"""],
"""embeddings""": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index("""embeddings""" , string_factory="""Flat""" , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.get_dummy_dataset()
_SCREAMING_SNAKE_CASE = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch("""transformers.models.rag.retrieval_rag.load_dataset""" ) as mock_load_dataset:
_SCREAMING_SNAKE_CASE = dataset
_SCREAMING_SNAKE_CASE = RagRetriever(
UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def UpperCamelCase ( self: int , UpperCAmelCase_: bool ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.get_dummy_dataset()
_SCREAMING_SNAKE_CASE = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="""custom""" , )
if from_disk:
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , """dataset""" )
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , """index.faiss""" )
dataset.get_index("""embeddings""" ).save(os.path.join(self.tmpdirname , """index.faiss""" ) )
dataset.drop_index("""embeddings""" )
dataset.save_to_disk(os.path.join(self.tmpdirname , """dataset""" ) )
del dataset
_SCREAMING_SNAKE_CASE = RagRetriever(
UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
_SCREAMING_SNAKE_CASE = RagRetriever(
UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , UpperCAmelCase_ ) , )
return retriever
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = Dataset.from_dict(
{
"""id""": ["""0""", """1"""],
"""text""": ["""foo""", """bar"""],
"""title""": ["""Foo""", """Bar"""],
"""embeddings""": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index("""embeddings""" , string_factory="""Flat""" , metric_type=faiss.METRIC_INNER_PRODUCT )
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , """hf_bert_base.hnswSQ8_correct_phi_128.c_index""" )
dataset.save_faiss_index("""embeddings""" , index_file_name + """.index.dpr""" )
pickle.dump(dataset["""id"""] , open(index_file_name + """.index_meta.dpr""" , """wb""" ) )
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , """psgs_w100.tsv.pkl""" )
_SCREAMING_SNAKE_CASE = {sample["""id"""]: [sample["""text"""], sample["""title"""]] for sample in dataset}
pickle.dump(UpperCAmelCase_ , open(UpperCAmelCase_ , """wb""" ) )
_SCREAMING_SNAKE_CASE = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="""legacy""" , index_path=self.tmpdirname , )
_SCREAMING_SNAKE_CASE = RagRetriever(
UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = self.get_dummy_canonical_hf_index_retriever()
_SCREAMING_SNAKE_CASE = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(UpperCAmelCase_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["""embeddings""", """id""", """text""", """title"""] )
self.assertEqual(len(doc_dicts[0]["""id"""] ) , UpperCAmelCase_ )
self.assertEqual(doc_dicts[0]["""id"""][0] , """1""" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["""id"""][0] , """0""" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch("""transformers.models.rag.retrieval_rag.load_dataset""" ) as mock_load_dataset:
_SCREAMING_SNAKE_CASE = self.get_dummy_dataset()
retriever.save_pretrained(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = RagRetriever.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE = retriever.retrieve(UpperCAmelCase_ , n_docs=1 )
self.assertTrue(out is not None )
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(UpperCAmelCase_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["""embeddings""", """id""", """text""", """title"""] )
self.assertEqual(len(doc_dicts[0]["""id"""] ) , UpperCAmelCase_ )
self.assertEqual(doc_dicts[0]["""id"""][0] , """1""" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["""id"""][0] , """0""" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = RagRetriever.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE = retriever.retrieve(UpperCAmelCase_ , n_docs=1 )
self.assertTrue(out is not None )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(UpperCAmelCase_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["""embeddings""", """id""", """text""", """title"""] )
self.assertEqual(len(doc_dicts[0]["""id"""] ) , UpperCAmelCase_ )
self.assertEqual(doc_dicts[0]["""id"""][0] , """1""" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["""id"""][0] , """0""" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = RagRetriever.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE = retriever.retrieve(UpperCAmelCase_ , n_docs=1 )
self.assertTrue(out is not None )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = self.get_dummy_legacy_index_retriever()
_SCREAMING_SNAKE_CASE = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(UpperCAmelCase_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["""text""", """title"""] )
self.assertEqual(len(doc_dicts[0]["""text"""] ) , UpperCAmelCase_ )
self.assertEqual(doc_dicts[0]["""text"""][0] , """bar""" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["""text"""][0] , """foo""" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = RagRetriever.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE = retriever.retrieve(UpperCAmelCase_ , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
import torch
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = self.get_dummy_canonical_hf_index_retriever()
_SCREAMING_SNAKE_CASE = [[5, 7], [10, 11]]
_SCREAMING_SNAKE_CASE = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE = retriever(UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (
out["""context_input_ids"""],
out["""context_attention_mask"""],
out["""retrieved_doc_embeds"""],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , np.ndarray )
_SCREAMING_SNAKE_CASE = retriever(
UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_ , return_tensors="""pt""" , )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = ( # noqa: F841
out["""context_input_ids"""],
out["""context_attention_mask"""],
out["""retrieved_doc_embeds"""],
out["""doc_ids"""],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.get_dpr_ctx_encoder_tokenizer()
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ )
retriever.set_ctx_encoder_tokenizer(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = [[5, 7], [10, 11]]
_SCREAMING_SNAKE_CASE = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE = retriever(UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_ )
self.assertEqual(
len(UpperCAmelCase_ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ("""tokenized_doc_ids""", """tokenized_doc_attention_mask""") ) , UpperCAmelCase_ ) # check for doc token related keys in dictionary.
| 355 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : List[str] = KandinskyVaaInpaintPipeline
__snake_case : Union[str, Any] = ["image_embeds", "negative_image_embeds", "image", "mask_image"]
__snake_case : Tuple = [
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
__snake_case : str = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
__snake_case : List[str] = False
@property
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
return 32
@property
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
return 32
@property
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
return self.time_input_dim
@property
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
return 100
@property
def UpperCamelCase ( self: str ):
'''simple docstring'''
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_SCREAMING_SNAKE_CASE = UNetaDConditionModel(**UpperCAmelCase_ )
return model
@property
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.dummy_unet
_SCREAMING_SNAKE_CASE = self.dummy_movq
_SCREAMING_SNAKE_CASE = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule="""linear""" , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , steps_offset=1 , prediction_type="""epsilon""" , thresholding=UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def UpperCamelCase ( self: Dict , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[str]=0 ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
UpperCAmelCase_ )
# create init_image
_SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(UpperCAmelCase_ ) ).convert("""RGB""" ).resize((256, 256) )
# create mask
_SCREAMING_SNAKE_CASE = np.ones((64, 64) , dtype=np.floataa )
_SCREAMING_SNAKE_CASE = 0
if str(UpperCAmelCase_ ).startswith("""mps""" ):
_SCREAMING_SNAKE_CASE = torch.manual_seed(UpperCAmelCase_ )
else:
_SCREAMING_SNAKE_CASE = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """cpu"""
_SCREAMING_SNAKE_CASE = self.get_dummy_components()
_SCREAMING_SNAKE_CASE = self.pipeline_class(**UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = pipe(**self.get_dummy_inputs(UpperCAmelCase_ ) )
_SCREAMING_SNAKE_CASE = output.images
_SCREAMING_SNAKE_CASE = pipe(
**self.get_dummy_inputs(UpperCAmelCase_ ) , return_dict=UpperCAmelCase_ , )[0]
_SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
print(F'image.shape {image.shape}' )
assert image.shape == (1, 64, 64, 3)
_SCREAMING_SNAKE_CASE = np.array(
[0.50_77_59_03, 0.49_52_71_95, 0.48_82_45_43, 0.50_19_22_37, 0.48_64_49_06, 0.49_37_38_14, 0.4_78_05_98, 0.47_23_48_27, 0.48_32_78_48] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def UpperCamelCase ( self: int ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __UpperCAmelCase (unittest.TestCase ):
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy""" )
_SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
_SCREAMING_SNAKE_CASE = np.ones((768, 768) , dtype=np.floataa )
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = """a hat"""
_SCREAMING_SNAKE_CASE = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = KandinskyVaaInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder-inpaint""" , torch_dtype=torch.floataa )
_SCREAMING_SNAKE_CASE = pipeline.to(UpperCAmelCase_ )
pipeline.set_progress_bar_config(disable=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = pipe_prior(
UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
_SCREAMING_SNAKE_CASE = pipeline(
image=UpperCAmelCase_ , mask_image=UpperCAmelCase_ , image_embeds=UpperCAmelCase_ , negative_image_embeds=UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=100 , height=768 , width=768 , output_type="""np""" , )
_SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCAmelCase_ , UpperCAmelCase_ )
| 125 | 0 |
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( A__ , unittest.TestCase ):
__lowerCamelCase : List[Any] = CodeGenTokenizer
__lowerCamelCase : str = CodeGenTokenizerFast
__lowerCamelCase : str = True
__lowerCamelCase : Optional[int] = {"""add_prefix_space""": True}
__lowerCamelCase : Union[str, Any] = False
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase : Optional[int] =[
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
UpperCAmelCase : Optional[int] =dict(zip(_A , range(len(_A ) ) ) )
UpperCAmelCase : Union[str, Any] =["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
UpperCAmelCase : List[str] ={"unk_token": "<unk>"}
UpperCAmelCase : Tuple =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase : Any =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_A ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_A ) )
def UpperCAmelCase__ ( self , **snake_case__ ) -> Dict:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **_A )
def UpperCAmelCase__ ( self , **snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **_A )
def UpperCAmelCase__ ( self , snake_case__ ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Any ="lower newer"
UpperCAmelCase : Dict ="lower newer"
return input_text, output_text
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : List[str] =CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCAmelCase : int ="lower newer"
UpperCAmelCase : List[Any] =["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
UpperCAmelCase : Tuple =tokenizer.tokenize(_A , add_prefix_space=_A )
self.assertListEqual(_A , _A )
UpperCAmelCase : int =tokens + [tokenizer.unk_token]
UpperCAmelCase : List[str] =[14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , _A )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
UpperCAmelCase : Tuple =self.get_tokenizer()
UpperCAmelCase : Any =self.get_rust_tokenizer(add_prefix_space=_A )
UpperCAmelCase : List[str] ="lower newer"
# Testing tokenization
UpperCAmelCase : Optional[Any] =tokenizer.tokenize(_A , add_prefix_space=_A )
UpperCAmelCase : Any =rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
# Testing conversion to ids without special tokens
UpperCAmelCase : int =tokenizer.encode(_A , add_special_tokens=_A , add_prefix_space=_A )
UpperCAmelCase : Optional[Any] =rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
# Testing conversion to ids with special tokens
UpperCAmelCase : int =self.get_rust_tokenizer(add_prefix_space=_A )
UpperCAmelCase : int =tokenizer.encode(_A , add_prefix_space=_A )
UpperCAmelCase : Optional[int] =rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
# Testing the unknown token
UpperCAmelCase : int =tokens + [rust_tokenizer.unk_token]
UpperCAmelCase : Any =[14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(_A ) , _A )
def UpperCAmelCase__ ( self , *snake_case__ , **snake_case__ ) -> str:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self , snake_case__=15 ) -> str:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase : Tuple =self.rust_tokenizer_class.from_pretrained(_A , **_A )
# Simple input
UpperCAmelCase : Optional[int] ="This is a simple input"
UpperCAmelCase : Dict =["This is a simple input 1", "This is a simple input 2"]
UpperCAmelCase : Optional[Any] =("This is a simple input", "This is a pair")
UpperCAmelCase : Union[str, Any] =[
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(_A , tokenizer_r.encode , _A , max_length=_A , padding='''max_length''' )
# Simple input
self.assertRaises(_A , tokenizer_r.encode_plus , _A , max_length=_A , padding='''max_length''' )
# Simple input
self.assertRaises(
_A , tokenizer_r.batch_encode_plus , _A , max_length=_A , padding='''max_length''' , )
# Pair input
self.assertRaises(_A , tokenizer_r.encode , _A , max_length=_A , padding='''max_length''' )
# Pair input
self.assertRaises(_A , tokenizer_r.encode_plus , _A , max_length=_A , padding='''max_length''' )
# Pair input
self.assertRaises(
_A , tokenizer_r.batch_encode_plus , _A , max_length=_A , padding='''max_length''' , )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : int =CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' )
# Simple input
UpperCAmelCase : Any ="This is a simple input"
UpperCAmelCase : str =["This is a simple input looooooooong", "This is a simple input"]
UpperCAmelCase : int =("This is a simple input", "This is a pair")
UpperCAmelCase : List[str] =[
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
UpperCAmelCase : Dict =tokenizer.pad_token_id
UpperCAmelCase : str =tokenizer(_A , padding='''max_length''' , max_length=30 , return_tensors='''np''' )
UpperCAmelCase : Tuple =tokenizer(_A , padding=_A , truncate=_A , return_tensors='''np''' )
UpperCAmelCase : Union[str, Any] =tokenizer(*_A , padding='''max_length''' , max_length=60 , return_tensors='''np''' )
UpperCAmelCase : Union[str, Any] =tokenizer(_A , padding=_A , truncate=_A , return_tensors='''np''' )
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['''input_ids'''] )
self.assertTrue(0 in out_s['''attention_mask'''] )
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] )
self.assertFalse(0 in out_sa['''attention_mask'''][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] )
self.assertTrue(0 in out_sa['''attention_mask'''][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['''input_ids'''] )
self.assertTrue(0 in out_p['''attention_mask'''] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] )
self.assertFalse(0 in out_pa['''attention_mask'''][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] )
self.assertTrue(0 in out_pa['''attention_mask'''][1] )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Tuple ="$$$"
UpperCAmelCase : str =CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=_A , add_bos_token=_A )
UpperCAmelCase : str ="This is a simple input"
UpperCAmelCase : Optional[Any] =["This is a simple input 1", "This is a simple input 2"]
UpperCAmelCase : Tuple =tokenizer.bos_token_id
UpperCAmelCase : List[Any] =tokenizer(_A )
UpperCAmelCase : Optional[int] =tokenizer(_A )
self.assertEqual(out_s.input_ids[0] , _A )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
UpperCAmelCase : Optional[int] =tokenizer.decode(out_s.input_ids )
UpperCAmelCase : int =tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , _A )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =CodeGenTokenizer.from_pretrained('''Salesforce/codegen-350M-mono''' )
UpperCAmelCase : List[str] ="\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"
UpperCAmelCase : int ="\nif len_a > len_b: result = a\nelse: result = b"
UpperCAmelCase : Optional[int] =tokenizer.encode(_A )
UpperCAmelCase : Union[str, Any] =["^#", re.escape('''<|endoftext|>''' ), "^'''", "^\"\"\"", "\n\n\n"]
UpperCAmelCase : Optional[Any] =tokenizer.decode(_A , truncate_before_pattern=_A )
self.assertEqual(_A , _A )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
pass
| 348 | from functools import lru_cache
@lru_cache
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
if num < 0:
raise ValueError("Number should not be negative." )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 0 |
from __future__ import annotations
def lowerCAmelCase_ ( __lowerCamelCase ):
if not nums:
return 0
__snake_case : List[str] = nums[0]
__snake_case : Tuple = 0
for num in nums[1:]:
__snake_case : Union[str, Any] = (
max_excluding + num,
max(_lowerCAmelCase , _lowerCAmelCase ),
)
return max(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 371 |
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : Union[str, Any] = logging.get_logger(__name__)
_snake_case : List[str] = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = "encodec"
def __init__( self : Any , lowerCamelCase : Optional[int]=[1.5, 3.0, 6.0, 12.0, 24.0] , lowerCamelCase : List[str]=24000 , lowerCamelCase : int=1 , lowerCamelCase : Optional[int]=False , lowerCamelCase : Dict=None , lowerCamelCase : Tuple=None , lowerCamelCase : Optional[int]=128 , lowerCamelCase : Optional[int]=32 , lowerCamelCase : List[str]=1 , lowerCamelCase : str=[8, 5, 4, 2] , lowerCamelCase : List[str]="weight_norm" , lowerCamelCase : Any=7 , lowerCamelCase : Tuple=7 , lowerCamelCase : int=3 , lowerCamelCase : int=2 , lowerCamelCase : Union[str, Any]=True , lowerCamelCase : List[Any]="reflect" , lowerCamelCase : Union[str, Any]=2 , lowerCamelCase : Optional[int]=2 , lowerCamelCase : int=1.0 , lowerCamelCase : Optional[Any]=1024 , lowerCamelCase : Optional[Any]=None , lowerCamelCase : str=True , **lowerCamelCase : Dict , ) -> Any:
__snake_case : Tuple = target_bandwidths
__snake_case : Union[str, Any] = sampling_rate
__snake_case : Union[str, Any] = audio_channels
__snake_case : Dict = normalize
__snake_case : List[Any] = chunk_length_s
__snake_case : Tuple = overlap
__snake_case : Optional[int] = hidden_size
__snake_case : List[Any] = num_filters
__snake_case : Union[str, Any] = num_residual_layers
__snake_case : Optional[int] = upsampling_ratios
__snake_case : List[str] = norm_type
__snake_case : Optional[int] = kernel_size
__snake_case : Dict = last_kernel_size
__snake_case : Tuple = residual_kernel_size
__snake_case : List[Any] = dilation_growth_rate
__snake_case : Optional[int] = use_causal_conv
__snake_case : Tuple = pad_mode
__snake_case : Union[str, Any] = compress
__snake_case : Union[str, Any] = num_lstm_layers
__snake_case : int = trim_right_ratio
__snake_case : Tuple = codebook_size
__snake_case : Optional[Any] = codebook_dim if codebook_dim is not None else hidden_size
__snake_case : int = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}' )
super().__init__(**lowerCamelCase )
@property
def __snake_case ( self : int ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __snake_case ( self : Union[str, Any] ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def __snake_case ( self : Optional[Any] ) -> int:
__snake_case : Union[str, Any] = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def __snake_case ( self : Optional[Any] ) -> int:
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 134 | 0 |
'''simple docstring'''
def __lowerCamelCase ( _lowercase , _lowercase ) -> int:
if len(_lowercase ) != len(_lowercase ):
raise ValueError("""String lengths must match!""" )
UpperCAmelCase : Any = 0
for chara, chara in zip(_lowercase , _lowercase ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 265 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
a : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , A , A , A , A , A , A , A , ) -> Optional[Any]:
super().__init__()
self.register_modules(
vae=A , text_encoder=A , tokenizer=A , unet=A , scheduler=A , safety_checker=A , feature_extractor=A , )
def _lowercase( self , A = "auto" ) -> List[Any]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase : Optional[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A )
def _lowercase( self ) -> Dict:
self.enable_attention_slicing(A )
@torch.no_grad()
def __call__( self , A , A = 512 , A = 512 , A = 50 , A = 7.5 , A = None , A = 1 , A = 0.0 , A = None , A = None , A = "pil" , A = True , A = None , A = 1 , A = None , **A , ) -> List[Any]:
if isinstance(A , A ):
UpperCAmelCase : List[str] = 1
elif isinstance(A , A ):
UpperCAmelCase : Dict = len(A )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(A )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A , A ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(A )}.''' )
# get prompt text embeddings
UpperCAmelCase : List[str] = self.tokenizer(
A , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
UpperCAmelCase : List[Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCAmelCase : int = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
UpperCAmelCase : Tuple = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
UpperCAmelCase : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = text_embeddings.shape
UpperCAmelCase : List[str] = text_embeddings.repeat(1 , A , 1 )
UpperCAmelCase : List[Any] = text_embeddings.view(bs_embed * num_images_per_prompt , A , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCAmelCase : Optional[int] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCAmelCase : List[str]
if negative_prompt is None:
UpperCAmelCase : Any = [""""""]
elif type(A ) is not type(A ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(A )} !='''
f''' {type(A )}.''' )
elif isinstance(A , A ):
UpperCAmelCase : Optional[int] = [negative_prompt]
elif batch_size != len(A ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(A )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
""" the batch size of `prompt`.""" )
else:
UpperCAmelCase : Any = negative_prompt
UpperCAmelCase : Dict = text_input_ids.shape[-1]
UpperCAmelCase : List[Any] = self.tokenizer(
A , padding="""max_length""" , max_length=A , truncation=A , return_tensors="""pt""" , )
UpperCAmelCase : Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase : int = uncond_embeddings.shape[1]
UpperCAmelCase : List[Any] = uncond_embeddings.repeat(A , A , 1 )
UpperCAmelCase : List[str] = uncond_embeddings.view(batch_size * num_images_per_prompt , A , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase : List[str] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCAmelCase : Tuple = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
UpperCAmelCase : Optional[int] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
UpperCAmelCase : str = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
UpperCAmelCase : Dict = torch.randn(
A , generator=A , device="""cpu""" , dtype=A ).to(self.device )
UpperCAmelCase : int = torch.randn(A , generator=A , device="""cpu""" , dtype=A ).to(
self.device )
else:
UpperCAmelCase : int = torch.randn(
A , generator=A , device=self.device , dtype=A )
UpperCAmelCase : int = torch.randn(A , generator=A , device=self.device , dtype=A )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
UpperCAmelCase : Optional[Any] = latents_reference.to(self.device )
UpperCAmelCase : Tuple = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
UpperCAmelCase : int = (latents_shape[3] - latents_shape_reference[3]) // 2
UpperCAmelCase : List[str] = (latents_shape[2] - latents_shape_reference[2]) // 2
UpperCAmelCase : Union[str, Any] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
UpperCAmelCase : Union[str, Any] = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
UpperCAmelCase : Optional[int] = 0 if dx < 0 else dx
UpperCAmelCase : List[str] = 0 if dy < 0 else dy
UpperCAmelCase : Union[str, Any] = max(-dx , 0 )
UpperCAmelCase : List[Any] = max(-dy , 0 )
# import pdb
# pdb.set_trace()
UpperCAmelCase : str = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
UpperCAmelCase : Union[str, Any] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCAmelCase : Optional[int] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCAmelCase : int = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCAmelCase : Optional[Any] = {}
if accepts_eta:
UpperCAmelCase : List[str] = eta
for i, t in enumerate(self.progress_bar(A ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase : str = self.scheduler.scale_model_input(A , A )
# predict the noise residual
UpperCAmelCase : Any = self.unet(A , A , encoder_hidden_states=A ).sample
# perform guidance
if do_classifier_free_guidance:
UpperCAmelCase , UpperCAmelCase : Any = noise_pred.chunk(2 )
UpperCAmelCase : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase : Dict = self.scheduler.step(A , A , A , **A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A , A , A )
UpperCAmelCase : Union[str, Any] = 1 / 0.1_8_2_1_5 * latents
UpperCAmelCase : Tuple = self.vae.decode(A ).sample
UpperCAmelCase : Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
UpperCAmelCase : int = self.feature_extractor(self.numpy_to_pil(A ) , return_tensors="""pt""" ).to(
self.device )
UpperCAmelCase , UpperCAmelCase : int = self.safety_checker(
images=A , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
UpperCAmelCase : Any = None
if output_type == "pil":
UpperCAmelCase : int = self.numpy_to_pil(A )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=A , nsfw_content_detected=A )
| 265 | 1 |
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
_lowercase : Optional[Any] = random.Random()
def lowercase__ ( snake_case_ :Tuple , snake_case_ :Any=1.0 , snake_case_ :str=None , snake_case_ :str=None ):
if rng is None:
__UpperCAmelCase = global_rng
__UpperCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class _UpperCAmelCase ( unittest.TestCase ):
def __init__( self : Optional[int] , _lowercase : Dict , _lowercase : Any=7 , _lowercase : Any=4_00 , _lowercase : Optional[Any]=20_00 , _lowercase : List[Any]=24 , _lowercase : List[Any]=24 , _lowercase : Optional[int]=0.0 , _lowercase : Dict=1_60_00 , _lowercase : Union[str, Any]=True , _lowercase : Any=True , ):
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = min_seq_length
__UpperCAmelCase = max_seq_length
__UpperCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__UpperCAmelCase = feature_size
__UpperCAmelCase = num_mel_bins
__UpperCAmelCase = padding_value
__UpperCAmelCase = sampling_rate
__UpperCAmelCase = return_attention_mask
__UpperCAmelCase = do_normalize
def a ( self : Dict ):
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def a ( self : Union[str, Any] , _lowercase : str=False , _lowercase : str=False ):
def _flatten(_lowercase : Tuple ):
return list(itertools.chain(*_lowercase ) )
if equal_length:
__UpperCAmelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__UpperCAmelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__UpperCAmelCase = [np.asarray(_lowercase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _UpperCAmelCase ( _lowerCAmelCase , unittest.TestCase ):
a__ : List[Any] = SpeechaTextFeatureExtractor if is_speech_available() else None
def a ( self : str ):
__UpperCAmelCase = SpeechaTextFeatureExtractionTester(self )
def a ( self : int , _lowercase : List[Any] ):
self.assertTrue(np.all(np.mean(_lowercase , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(_lowercase , axis=0 ) - 1 ) < 1E-3 ) )
def a ( self : Optional[Any] ):
# Tests that all call wrap to encode_plus and batch_encode_plus
__UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
__UpperCAmelCase = [np.asarray(_lowercase ) for speech_input in speech_inputs]
# Test feature size
__UpperCAmelCase = feature_extractor(_lowercase , padding=_lowercase , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
__UpperCAmelCase = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
__UpperCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(_lowercase , _lowercase , atol=1E-3 ) )
# Test batched
__UpperCAmelCase = feature_extractor(_lowercase , return_tensors='''np''' ).input_features
__UpperCAmelCase = feature_extractor(_lowercase , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(_lowercase , _lowercase ):
self.assertTrue(np.allclose(_lowercase , _lowercase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__UpperCAmelCase = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
__UpperCAmelCase = np.asarray(_lowercase )
__UpperCAmelCase = feature_extractor(_lowercase , return_tensors='''np''' ).input_features
__UpperCAmelCase = feature_extractor(_lowercase , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(_lowercase , _lowercase ):
self.assertTrue(np.allclose(_lowercase , _lowercase , atol=1E-3 ) )
def a ( self : Dict ):
__UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
__UpperCAmelCase = ['''longest''', '''max_length''', '''do_not_pad''']
__UpperCAmelCase = [None, 16, None]
for max_length, padding in zip(_lowercase , _lowercase ):
__UpperCAmelCase = feature_extractor(
_lowercase , padding=_lowercase , max_length=_lowercase , return_attention_mask=_lowercase )
__UpperCAmelCase = inputs.input_features
__UpperCAmelCase = inputs.attention_mask
__UpperCAmelCase = [np.sum(_lowercase ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def a ( self : str ):
__UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
__UpperCAmelCase = ['''longest''', '''max_length''', '''do_not_pad''']
__UpperCAmelCase = [None, 16, None]
for max_length, padding in zip(_lowercase , _lowercase ):
__UpperCAmelCase = feature_extractor(
_lowercase , max_length=_lowercase , padding=_lowercase , return_tensors='''np''' , return_attention_mask=_lowercase )
__UpperCAmelCase = inputs.input_features
__UpperCAmelCase = inputs.attention_mask
__UpperCAmelCase = [np.sum(_lowercase ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def a ( self : List[Any] ):
__UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
__UpperCAmelCase = feature_extractor(
_lowercase , padding='''max_length''' , max_length=4 , truncation=_lowercase , return_tensors='''np''' , return_attention_mask=_lowercase , )
__UpperCAmelCase = inputs.input_features
__UpperCAmelCase = inputs.attention_mask
__UpperCAmelCase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def a ( self : List[str] ):
__UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
__UpperCAmelCase = feature_extractor(
_lowercase , padding='''longest''' , max_length=4 , truncation=_lowercase , return_tensors='''np''' , return_attention_mask=_lowercase , )
__UpperCAmelCase = inputs.input_features
__UpperCAmelCase = inputs.attention_mask
__UpperCAmelCase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24) )
__UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
__UpperCAmelCase = feature_extractor(
_lowercase , padding='''longest''' , max_length=16 , truncation=_lowercase , return_tensors='''np''' , return_attention_mask=_lowercase , )
__UpperCAmelCase = inputs.input_features
__UpperCAmelCase = inputs.attention_mask
__UpperCAmelCase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24) )
def a ( self : Optional[Any] ):
import torch
__UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCAmelCase = np.random.rand(1_00 , 32 ).astype(np.floataa )
__UpperCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__UpperCAmelCase = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
__UpperCAmelCase = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def a ( self : Optional[int] , _lowercase : Tuple ):
from datasets import load_dataset
__UpperCAmelCase = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
__UpperCAmelCase = ds.sort('''id''' ).select(range(_lowercase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def a ( self : List[Any] ):
# fmt: off
__UpperCAmelCase = np.array([
-1.5_745, -1.7_713, -1.7_020, -1.6_069, -1.2_250, -1.1_105, -0.9_072, -0.8_241,
-1.2_310, -0.8_098, -0.3_320, -0.4_101, -0.7_985, -0.4_996, -0.8_213, -0.9_128,
-1.0_420, -1.1_286, -1.0_440, -0.7_999, -0.8_405, -1.2_275, -1.5_443, -1.4_625,
] )
# fmt: on
__UpperCAmelCase = self._load_datasamples(1 )
__UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCAmelCase = feature_extractor(_lowercase , return_tensors='''pt''' ).input_features
self.assertEquals(input_features.shape , (1, 5_84, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] , _lowercase , atol=1E-4 ) )
| 360 |
"""simple docstring"""
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
_lowercase : str = {
# 1536-bit
5: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 2048-bit
14: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AACAA68FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 3072-bit
15: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 4096-bit
16: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'
+ 'FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 6144-bit
17: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'
+ '8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'
+ '302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'
+ 'A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'
+ '49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'
+ 'FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'
+ '180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'
+ '3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'
+ '04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'
+ 'B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'
+ '1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'
+ 'E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'
+ '99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'
+ '04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'
+ '233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'
+ 'D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'
+ 'AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'
+ 'DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'
+ '2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'
+ 'F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'
+ 'BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'
+ 'B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'
+ '387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'
+ '6DCC4024FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 8192-bit
18: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'
+ 'F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'
+ '179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'
+ 'DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'
+ '5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'
+ 'D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'
+ '23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'
+ '06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'
+ 'DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'
+ '12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'
+ '38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'
+ '741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'
+ '3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'
+ '22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'
+ '4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'
+ '062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'
+ '4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'
+ 'B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'
+ '4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'
+ '9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'
+ '60C980DD98EDD3DFFFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
}
class _UpperCAmelCase :
def __init__( self : List[Any] , _lowercase : int = 14 ):
if group not in primes:
raise ValueError('''Unsupported Group''' )
__UpperCAmelCase = primes[group]['''prime''']
__UpperCAmelCase = primes[group]['''generator''']
__UpperCAmelCase = int(hexlify(urandom(32 ) ) , base=16 )
def a ( self : int ):
return hex(self.__private_key )[2:]
def a ( self : Dict ):
__UpperCAmelCase = pow(self.generator , self.__private_key , self.prime )
return hex(_lowercase )[2:]
def a ( self : Union[str, Any] , _lowercase : int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(_lowercase , (self.prime - 1) // 2 , self.prime ) == 1
)
def a ( self : Optional[Any] , _lowercase : str ):
__UpperCAmelCase = int(_lowercase , base=16 )
if not self.is_valid_public_key(_lowercase ):
raise ValueError('''Invalid public key''' )
__UpperCAmelCase = pow(_lowercase , self.__private_key , self.prime )
return shaaaa(str(_lowercase ).encode() ).hexdigest()
@staticmethod
def a ( _lowercase : int , _lowercase : int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(_lowercase , (prime - 1) // 2 , _lowercase ) == 1
)
@staticmethod
def a ( _lowercase : str , _lowercase : str , _lowercase : int = 14 ):
__UpperCAmelCase = int(_lowercase , base=16 )
__UpperCAmelCase = int(_lowercase , base=16 )
__UpperCAmelCase = primes[group]['''prime''']
if not DiffieHellman.is_valid_public_key_static(_lowercase , _lowercase ):
raise ValueError('''Invalid public key''' )
__UpperCAmelCase = pow(_lowercase , _lowercase , _lowercase )
return shaaaa(str(_lowercase ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 86 | 0 |
'''simple docstring'''
lowercase_ = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
lowercase_ = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
lowercase_ = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
lowercase_ = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
lowercase_ = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
lowercase_ = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
lowercase_ = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
lowercase_ = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 58 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class __SCREAMING_SNAKE_CASE:
def __init__( self: int , UpperCamelCase: List[str] , UpperCamelCase: str=13 , UpperCamelCase: int=7 , UpperCamelCase: Any=True , UpperCamelCase: Dict=True , UpperCamelCase: Dict=False , UpperCamelCase: Optional[int]=True , UpperCamelCase: Dict=99 , UpperCamelCase: Dict=32 , UpperCamelCase: Optional[Any]=5 , UpperCamelCase: Union[str, Any]=4 , UpperCamelCase: List[str]=37 , UpperCamelCase: List[str]="gelu" , UpperCamelCase: Optional[Any]=0.1 , UpperCamelCase: Union[str, Any]=0.1 , UpperCamelCase: Union[str, Any]=5_12 , UpperCamelCase: str=16 , UpperCamelCase: int=2 , UpperCamelCase: Optional[int]=0.02 , UpperCamelCase: Union[str, Any]=3 , UpperCamelCase: Dict=4 , UpperCamelCase: List[str]=None , ) -> List[str]:
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = seq_length
snake_case__ = is_training
snake_case__ = use_input_mask
snake_case__ = use_token_type_ids
snake_case__ = use_labels
snake_case__ = vocab_size
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = max_position_embeddings
snake_case__ = type_vocab_size
snake_case__ = type_sequence_label_size
snake_case__ = initializer_range
snake_case__ = num_labels
snake_case__ = num_choices
snake_case__ = scope
def lowerCAmelCase_ ( self: List[str] ) -> Dict:
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ = None
if self.use_input_mask:
snake_case__ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ = None
if self.use_token_type_ids:
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ = None
snake_case__ = None
snake_case__ = None
if self.use_labels:
snake_case__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ = ids_tensor([self.batch_size] , self.num_choices )
snake_case__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self: Optional[Any] ) -> Union[str, Any]:
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , )
def lowerCAmelCase_ ( self: Optional[int] , UpperCamelCase: Dict , UpperCamelCase: List[Any] , UpperCamelCase: List[str] , UpperCamelCase: List[str] , UpperCamelCase: Any , UpperCamelCase: List[Any] , UpperCamelCase: str ) -> Dict:
snake_case__ = LlamaModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase )
snake_case__ = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: List[str] , UpperCamelCase: Tuple , UpperCamelCase: Optional[int] , UpperCamelCase: Union[str, Any] , UpperCamelCase: List[Any] , UpperCamelCase: Any , UpperCamelCase: Optional[Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: List[Any] , ) -> str:
snake_case__ = True
snake_case__ = LlamaModel(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ = model(
UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , encoder_attention_mask=UpperCamelCase , )
snake_case__ = model(
UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , )
snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: Any , UpperCamelCase: List[str] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: List[Any] , UpperCamelCase: Dict , UpperCamelCase: Any , UpperCamelCase: int , UpperCamelCase: Optional[Any] , ) -> Any:
snake_case__ = LlamaForCausalLM(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: Dict , UpperCamelCase: Optional[Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: List[str] , UpperCamelCase: List[str] , UpperCamelCase: List[str] , UpperCamelCase: int , UpperCamelCase: str , UpperCamelCase: List[str] , ) -> Union[str, Any]:
snake_case__ = True
snake_case__ = True
snake_case__ = LlamaForCausalLM(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
# first forward pass
snake_case__ = model(
UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , encoder_attention_mask=UpperCamelCase , use_cache=UpperCamelCase , )
snake_case__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case__ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
snake_case__ = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case__ = torch.cat([input_mask, next_mask] , dim=-1 )
snake_case__ = model(
UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , encoder_attention_mask=UpperCamelCase , output_hidden_states=UpperCamelCase , )['hidden_states'][0]
snake_case__ = model(
UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , encoder_attention_mask=UpperCamelCase , past_key_values=UpperCamelCase , output_hidden_states=UpperCamelCase , )['hidden_states'][0]
# select random slice
snake_case__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case__ = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-3 ) )
def lowerCAmelCase_ ( self: int ) -> Dict:
snake_case__ = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) = config_and_inputs
snake_case__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE( a_ , a_ , a_ , unittest.TestCase ):
_UpperCAmelCase = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
_UpperCAmelCase = (LlamaForCausalLM,) if is_torch_available() else ()
_UpperCAmelCase = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
def lowerCAmelCase_ ( self: int ) -> int:
snake_case__ = LlamaModelTester(self )
snake_case__ = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37 )
def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[Any]:
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self: int ) -> int:
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def lowerCAmelCase_ ( self: Optional[Any] ) -> str:
snake_case__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case__ = type
self.model_tester.create_and_check_model(*UpperCamelCase )
def lowerCAmelCase_ ( self: List[Any] ) -> Union[str, Any]:
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ = 3
snake_case__ = input_dict['input_ids']
snake_case__ = input_ids.ne(1 ).to(UpperCamelCase )
snake_case__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case__ = LlamaForSequenceClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase_ ( self: str ) -> Union[str, Any]:
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ = 3
snake_case__ = 'single_label_classification'
snake_case__ = input_dict['input_ids']
snake_case__ = input_ids.ne(1 ).to(UpperCamelCase )
snake_case__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case__ = LlamaForSequenceClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase_ ( self: Dict ) -> int:
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ = 3
snake_case__ = 'multi_label_classification'
snake_case__ = input_dict['input_ids']
snake_case__ = input_ids.ne(1 ).to(UpperCamelCase )
snake_case__ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
snake_case__ = LlamaForSequenceClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def lowerCAmelCase_ ( self: Dict ) -> Any:
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: Optional[Any] ) -> List[str]:
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ = ids_tensor([1, 10] , config.vocab_size )
snake_case__ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
snake_case__ = LlamaModel(UpperCamelCase )
original_model.to(UpperCamelCase )
original_model.eval()
snake_case__ = original_model(UpperCamelCase ).last_hidden_state
snake_case__ = original_model(UpperCamelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
snake_case__ = {'type': scaling_type, 'factor': 10.0}
snake_case__ = LlamaModel(UpperCamelCase )
scaled_model.to(UpperCamelCase )
scaled_model.eval()
snake_case__ = scaled_model(UpperCamelCase ).last_hidden_state
snake_case__ = scaled_model(UpperCamelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-5 ) )
@require_torch
class __SCREAMING_SNAKE_CASE( unittest.TestCase ):
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def lowerCAmelCase_ ( self: Union[str, Any] ) -> str:
snake_case__ = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
snake_case__ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
snake_case__ = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
snake_case__ = torch.tensor([[-6.6_550, -4.1_227, -4.9_859, -3.2_406, 0.8_262, -3.0_033, 1.2_964, -3.3_699]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
snake_case__ = torch.tensor([-12.8_281, -7.4_453, -0.4_639, -8.0_625, -7.2_500, -8.0_000, -6.4_883, -7.7_695, -7.8_438, -7.0_312, -6.2_188, -7.1_328, -1.8_496, 1.9_961, -8.6_250, -6.7_227, -12.8_281, -6.9_492, -7.0_742, -7.7_852, -7.5_820, -7.9_062, -6.9_375, -7.9_805, -8.3_438, -8.1_562, -8.0_469, -7.6_250, -7.7_422, -7.3_398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCamelCase , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Optional[Any]:
snake_case__ = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
snake_case__ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
snake_case__ = model(torch.tensor(UpperCamelCase ) )
# Expected mean on dim = -1
snake_case__ = torch.tensor([[-2.0_622, -1.2_794, -1.1_638, -0.9_788, -1.4_603, -1.0_238, -1.7_893, -1.4_411]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
snake_case__ = torch.tensor([-8.1_406, -8.0_547, 2.7_461, -1.2_344, -0.1_448, -1.8_262, -1.0_020, -1.8_154, -1.6_895, -1.8_516, -2.3_574, -0.9_277, 3.7_598, 6.5_742, -1.2_998, -0.1_177, -8.1_406, -2.9_688, -2.9_199, -3.1_699, -3.5_254, -2.3_555, -2.7_988, -3.4_141, -2.8_262, -4.5_195, -3.3_379, -3.3_164, -2.7_832, -3.0_273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCamelCase , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def lowerCAmelCase_ ( self: int ) -> List[Any]:
snake_case__ = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
snake_case__ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
snake_case__ = model(torch.tensor(UpperCamelCase ) )
# Expected mean on dim = -1
snake_case__ = torch.tensor([[-0.8_562, -1.8_520, -0.7_551, -0.4_162, -1.5_161, -1.2_038, -2.4_823, -2.3_254]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
snake_case__ = torch.tensor([-2.2_227, 4.8_828, 0.9_023, -0.4_578, -0.7_871, -0.1_033, -0.6_221, -0.5_786, -0.7_803, -1.0_674, -1.2_920, -0.1_570, 0.8_008, 2.0_723, -0.9_497, 0.2_771, -2.2_227, -0.7_612, -1.4_346, -1.2_061, -1.6_426, -0.3_000, -0.7_139, -1.1_934, -1.8_691, -1.6_973, -1.5_947, -1.2_705, -0.3_523, -0.5_513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase , atol=1e-2 , rtol=1e-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def lowerCAmelCase_ ( self: List[str] ) -> Tuple:
snake_case__ = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
snake_case__ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
snake_case__ = model(torch.tensor(UpperCamelCase ) )
snake_case__ = torch.tensor(
[[-4.2_327, -3.3_360, -4.6_665, -4.7_631, -1.8_180, -3.4_170, -1.4_211, -3.1_810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase , atol=1e-2 , rtol=1e-2 )
# fmt: off
snake_case__ = torch.tensor([-9.4_922, -3.9_551, 1.7_998, -5.6_758, -5.1_055, -5.8_984, -4.8_320, -6.8_086, -6.5_391, -5.6_172, -5.5_820, -5.5_352, 1.7_881, 3.6_289, -6.5_117, -3.4_785, -9.5_000, -6.0_352, -6.8_125, -6.0_195, -6.6_836, -5.4_727, -6.2_812, -6.0_391, -7.3_398, -7.4_297, -7.4_844, -6.5_820, -5.8_789, -5.5_312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCamelCase , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Model is curently gated' )
@slow
def lowerCAmelCase_ ( self: Tuple ) -> Optional[int]:
snake_case__ = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
snake_case__ = 'Simply put, the theory of relativity states that '
snake_case__ = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
snake_case__ = tokenizer.encode(UpperCamelCase , return_tensors='pt' )
snake_case__ = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=UpperCamelCase )
# greedy generation outputs
snake_case__ = model.generate(UpperCamelCase , max_new_tokens=64 , top_p=UpperCamelCase , temperature=1 , do_sample=UpperCamelCase )
snake_case__ = tokenizer.decode(generated_ids[0] , skip_special_tokens=UpperCamelCase )
self.assertEqual(UpperCamelCase , UpperCamelCase )
| 307 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class _A ( __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : int = "lxmert"
_SCREAMING_SNAKE_CASE : int = {}
def __init__( self , __UpperCAmelCase=30_522 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=9_500 , __UpperCAmelCase=1_600 , __UpperCAmelCase=400 , __UpperCAmelCase=3_072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=9 , __UpperCAmelCase=5 , __UpperCAmelCase=5 , __UpperCAmelCase=2_048 , __UpperCAmelCase=4 , __UpperCAmelCase=6.67 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , **__UpperCAmelCase , ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : str = vocab_size
__UpperCAmelCase : int = hidden_size
__UpperCAmelCase : List[str] = num_attention_heads
__UpperCAmelCase : Dict = hidden_act
__UpperCAmelCase : List[str] = intermediate_size
__UpperCAmelCase : int = hidden_dropout_prob
__UpperCAmelCase : int = attention_probs_dropout_prob
__UpperCAmelCase : int = max_position_embeddings
__UpperCAmelCase : int = type_vocab_size
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : str = layer_norm_eps
__UpperCAmelCase : List[Any] = num_qa_labels
__UpperCAmelCase : Optional[int] = num_object_labels
__UpperCAmelCase : Optional[Any] = num_attr_labels
__UpperCAmelCase : Tuple = l_layers
__UpperCAmelCase : Union[str, Any] = x_layers
__UpperCAmelCase : Optional[int] = r_layers
__UpperCAmelCase : Optional[Any] = visual_feat_dim
__UpperCAmelCase : Dict = visual_pos_dim
__UpperCAmelCase : Dict = visual_loss_normalizer
__UpperCAmelCase : Any = task_matched
__UpperCAmelCase : List[Any] = task_mask_lm
__UpperCAmelCase : Optional[Any] = task_obj_predict
__UpperCAmelCase : Dict = task_qa
__UpperCAmelCase : Any = visual_obj_loss
__UpperCAmelCase : Union[str, Any] = visual_attr_loss
__UpperCAmelCase : Tuple = visual_feat_loss
__UpperCAmelCase : str = {"""vision""": r_layers, """cross_encoder""": x_layers, """language""": l_layers}
super().__init__(**__UpperCAmelCase )
| 16 |
'''simple docstring'''
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
_UpperCamelCase = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def lowercase_ ( lowerCAmelCase__ : str ):
"""simple docstring"""
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def lowercase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str ):
"""simple docstring"""
return max(metric_fn(lowerCAmelCase__ , lowerCAmelCase__ ) for gt in ground_truths )
def lowercase_ ( lowerCAmelCase__ : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = [line.strip() for line in open(lowerCAmelCase__ , """r""" ).readlines()]
__UpperCAmelCase : Union[str, Any] = []
if args.gold_data_mode == "qa":
__UpperCAmelCase : Tuple = pd.read_csv(lowerCAmelCase__ , sep="""\t""" , header=lowerCAmelCase__ )
for answer_list in data[1]:
__UpperCAmelCase : Optional[int] = ast.literal_eval(lowerCAmelCase__ )
answers.append(lowerCAmelCase__ )
else:
__UpperCAmelCase : Optional[int] = [line.strip() for line in open(lowerCAmelCase__ , """r""" ).readlines()]
__UpperCAmelCase : str = [[reference] for reference in references]
__UpperCAmelCase : Optional[int] = 0
for prediction, ground_truths in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
total += 1
em += metric_max_over_ground_truths(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
fa += metric_max_over_ground_truths(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__UpperCAmelCase : int = 100.0 * em / total
__UpperCAmelCase : Dict = 100.0 * fa / total
logger.info(f'F1: {fa:.2f}' )
logger.info(f'EM: {em:.2f}' )
def lowercase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase : Tuple = args.k
__UpperCAmelCase : Dict = [line.strip() for line in open(lowerCAmelCase__ , """r""" ).readlines()]
__UpperCAmelCase : Dict = [line.strip() for line in open(lowerCAmelCase__ , """r""" ).readlines()]
__UpperCAmelCase : Union[str, Any] = 0
for hypo, reference in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
__UpperCAmelCase : List[str] = set(hypo.split("""\t""" )[:k] )
__UpperCAmelCase : List[Any] = set(reference.split("""\t""" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
__UpperCAmelCase : List[str] = 100.0 * em / total
logger.info(f'Precision@{k}: {em: .2f}' )
def lowercase_ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict ):
"""simple docstring"""
def strip_title(lowerCAmelCase__ : Optional[int] ):
if title.startswith("""\"""" ):
__UpperCAmelCase : List[Any] = title[1:]
if title.endswith("""\"""" ):
__UpperCAmelCase : int = title[:-1]
return title
__UpperCAmelCase : int = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
lowerCAmelCase__ , return_tensors="""pt""" , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , )["""input_ids"""].to(args.device )
__UpperCAmelCase : str = rag_model.rag.question_encoder(lowerCAmelCase__ )
__UpperCAmelCase : int = question_enc_outputs[0]
__UpperCAmelCase : Dict = rag_model.retriever(
lowerCAmelCase__ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="""pt""" , )
__UpperCAmelCase : Union[str, Any] = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
__UpperCAmelCase : Union[str, Any] = []
for docs in all_docs:
__UpperCAmelCase : int = [strip_title(lowerCAmelCase__ ) for title in docs["""title"""]]
provenance_strings.append("""\t""".join(lowerCAmelCase__ ) )
return provenance_strings
def lowercase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple ):
"""simple docstring"""
with torch.no_grad():
__UpperCAmelCase : int = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
lowerCAmelCase__ , return_tensors="""pt""" , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ )
__UpperCAmelCase : List[str] = inputs_dict.input_ids.to(args.device )
__UpperCAmelCase : List[Any] = inputs_dict.attention_mask.to(args.device )
__UpperCAmelCase : List[str] = rag_model.generate( # rag_model overwrites generate
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=lowerCAmelCase__ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
__UpperCAmelCase : str = rag_model.retriever.generator_tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
if args.print_predictions:
for q, a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
logger.info("""Q: {} - A: {}""".format(lowerCAmelCase__ , lowerCAmelCase__ ) )
return answers
def lowercase_ ( ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--model_type""" , choices=["""rag_sequence""", """rag_token""", """bart"""] , type=lowerCAmelCase__ , help=(
"""RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"""
""" model_name_or_path"""
) , )
parser.add_argument(
"""--index_name""" , default=lowerCAmelCase__ , choices=["""exact""", """compressed""", """legacy"""] , type=lowerCAmelCase__ , help="""RAG model retriever type""" , )
parser.add_argument(
"""--index_path""" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , help="""Path to the retrieval index""" , )
parser.add_argument("""--n_docs""" , default=5 , type=lowerCAmelCase__ , help="""Number of retrieved docs""" )
parser.add_argument(
"""--model_name_or_path""" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="""Path to pretrained checkpoints or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--eval_mode""" , choices=["""e2e""", """retrieval"""] , default="""e2e""" , type=lowerCAmelCase__ , help=(
"""Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"""
""" precision@k."""
) , )
parser.add_argument("""--k""" , default=1 , type=lowerCAmelCase__ , help="""k for the precision@k calculation""" )
parser.add_argument(
"""--evaluation_set""" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="""Path to a file containing evaluation samples""" , )
parser.add_argument(
"""--gold_data_path""" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="""Path to a tab-separated file with gold samples""" , )
parser.add_argument(
"""--gold_data_mode""" , default="""qa""" , type=lowerCAmelCase__ , choices=["""qa""", """ans"""] , help=(
"""Format of the gold data file"""
"""qa - a single line in the following format: question [tab] answer_list"""
"""ans - a single line of the gold file contains the expected answer string"""
) , )
parser.add_argument(
"""--predictions_path""" , type=lowerCAmelCase__ , default="""predictions.txt""" , help="""Name of the predictions file, to be stored in the checkpoints directory""" , )
parser.add_argument(
"""--eval_all_checkpoints""" , action="""store_true""" , help="""Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number""" , )
parser.add_argument(
"""--eval_batch_size""" , default=8 , type=lowerCAmelCase__ , help="""Batch size per GPU/CPU for evaluation.""" , )
parser.add_argument(
"""--recalculate""" , help="""Recalculate predictions even if the prediction file exists""" , action="""store_true""" , )
parser.add_argument(
"""--num_beams""" , default=4 , type=lowerCAmelCase__ , help="""Number of beams to be used when generating answers""" , )
parser.add_argument("""--min_length""" , default=1 , type=lowerCAmelCase__ , help="""Min length of the generated answers""" )
parser.add_argument("""--max_length""" , default=50 , type=lowerCAmelCase__ , help="""Max length of the generated answers""" )
parser.add_argument(
"""--print_predictions""" , action="""store_true""" , help="""If True, prints predictions while evaluating.""" , )
parser.add_argument(
"""--print_docs""" , action="""store_true""" , help="""If True, prints docs retried while generating.""" , )
__UpperCAmelCase : str = parser.parse_args()
__UpperCAmelCase : Optional[Any] = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
return args
def lowercase_ ( lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = {}
if args.model_type is None:
__UpperCAmelCase : str = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("""rag""" ):
__UpperCAmelCase : Tuple = RagTokenForGeneration if args.model_type == """rag_token""" else RagSequenceForGeneration
__UpperCAmelCase : Dict = args.n_docs
if args.index_name is not None:
__UpperCAmelCase : Union[str, Any] = args.index_name
if args.index_path is not None:
__UpperCAmelCase : Dict = args.index_path
else:
__UpperCAmelCase : str = BartForConditionalGeneration
__UpperCAmelCase : str = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("""Evaluate the following checkpoints: %s""" , lowerCAmelCase__ )
__UpperCAmelCase : Optional[int] = get_scores if args.eval_mode == """e2e""" else get_precision_at_k
__UpperCAmelCase : Any = evaluate_batch_eae if args.eval_mode == """e2e""" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("""Calculating metrics based on an existing predictions file: {}""".format(args.predictions_path ) )
score_fn(lowerCAmelCase__ , args.predictions_path , args.gold_data_path )
continue
logger.info("""***** Running evaluation for {} *****""".format(lowerCAmelCase__ ) )
logger.info(""" Batch size = %d""" , args.eval_batch_size )
logger.info(""" Predictions will be stored under {}""".format(args.predictions_path ) )
if args.model_type.startswith("""rag""" ):
__UpperCAmelCase : Optional[int] = RagRetriever.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
__UpperCAmelCase : Any = model_class.from_pretrained(lowerCAmelCase__ , retriever=lowerCAmelCase__ , **lowerCAmelCase__ )
model.retriever.init_retrieval()
else:
__UpperCAmelCase : Tuple = model_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
model.to(args.device )
with open(args.evaluation_set , """r""" ) as eval_file, open(args.predictions_path , """w""" ) as preds_file:
__UpperCAmelCase : Union[str, Any] = []
for line in tqdm(lowerCAmelCase__ ):
questions.append(line.strip() )
if len(lowerCAmelCase__ ) == args.eval_batch_size:
__UpperCAmelCase : Any = evaluate_batch_fn(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
preds_file.write("""\n""".join(lowerCAmelCase__ ) + """\n""" )
preds_file.flush()
__UpperCAmelCase : List[str] = []
if len(lowerCAmelCase__ ) > 0:
__UpperCAmelCase : Optional[Any] = evaluate_batch_fn(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
preds_file.write("""\n""".join(lowerCAmelCase__ ) )
preds_file.flush()
score_fn(lowerCAmelCase__ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
_UpperCamelCase = get_args()
main(args)
| 16 | 1 |
"""simple docstring"""
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
__snake_case : List[str] = 2
class A__ :
'''simple docstring'''
def __init__( self: List[str] , *, # begin keyword-only arguments
_SCREAMING_SNAKE_CASE: Any="<s>" , _SCREAMING_SNAKE_CASE: Dict="<pad>" , _SCREAMING_SNAKE_CASE: List[str]="</s>" , _SCREAMING_SNAKE_CASE: Optional[int]="<unk>" , _SCREAMING_SNAKE_CASE: Union[str, Any]=None , ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : int = bos, unk, pad, eos
__lowerCAmelCase : str = []
__lowerCAmelCase : Dict = []
__lowerCAmelCase : List[Any] = {}
__lowerCAmelCase : Optional[int] = self.add_symbol(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = self.add_symbol(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = self.add_symbol(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = self.add_symbol(_SCREAMING_SNAKE_CASE)
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = len(self.symbols)
def __eq__( self: str , _SCREAMING_SNAKE_CASE: Any) -> str:
"""simple docstring"""
return self.indices == other.indices
def __getitem__( self: Tuple , _SCREAMING_SNAKE_CASE: List[str]) -> Optional[Any]:
"""simple docstring"""
if idx < len(self.symbols):
return self.symbols[idx]
return self.unk_word
def __len__( self: Union[str, Any]) -> int:
"""simple docstring"""
return len(self.symbols)
def __contains__( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[int]) -> Dict:
"""simple docstring"""
return sym in self.indices
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: Optional[int] , _SCREAMING_SNAKE_CASE: List[str]) -> int:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = cls()
d.add_from_file(_SCREAMING_SNAKE_CASE)
return d
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: List[str]=1 , _SCREAMING_SNAKE_CASE: List[Any]=False) -> List[str]:
"""simple docstring"""
if word in self.indices and not overwrite:
__lowerCAmelCase : Optional[Any] = self.indices[word]
__lowerCAmelCase : List[Any] = self.count[idx] + n
return idx
else:
__lowerCAmelCase : List[Any] = len(self.symbols)
__lowerCAmelCase : Tuple = idx
self.symbols.append(_SCREAMING_SNAKE_CASE)
self.count.append(_SCREAMING_SNAKE_CASE)
return idx
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: Dict) -> str:
"""simple docstring"""
return 0
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: Dict) -> Optional[int]:
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
try:
with open(_SCREAMING_SNAKE_CASE , "r" , encoding="utf-8") as fd:
self.add_from_file(_SCREAMING_SNAKE_CASE)
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("Incorrect encoding detected in {}, please rebuild the dataset".format(_SCREAMING_SNAKE_CASE))
return
__lowerCAmelCase : Optional[Any] = f.readlines()
__lowerCAmelCase : str = self._load_meta(_SCREAMING_SNAKE_CASE)
for line in lines[indices_start_line:]:
try:
__lowerCAmelCase : int = line.rstrip().rsplit(" " , 1)
if field == "#fairseq:overwrite":
__lowerCAmelCase : Optional[int] = True
__lowerCAmelCase : Any = line.rsplit(" " , 1)
else:
__lowerCAmelCase : str = False
__lowerCAmelCase : Any = int(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Any = line
if word in self and not overwrite:
raise RuntimeError(
"Duplicate word found when loading Dictionary: \'{}\'. "
"Duplicate words can overwrite earlier ones by adding the "
"#fairseq:overwrite flag at the end of the corresponding row "
"in the dictionary file. If using the Camembert model, please "
"download an updated copy of the model file.".format(_SCREAMING_SNAKE_CASE))
self.add_symbol(_SCREAMING_SNAKE_CASE , n=_SCREAMING_SNAKE_CASE , overwrite=_SCREAMING_SNAKE_CASE)
except ValueError:
raise ValueError("Incorrect dictionary format, expected \'<token> <cnt> [flags]\'")
def _lowercase ( __snake_case ) -> List[Any]:
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
__lowerCAmelCase : List[str] = dict((re.sub(r"@@$" ,"" ,SCREAMING_SNAKE_CASE__ ), v) if k.endswith("@@" ) else (re.sub(r"$" ,"</w>" ,SCREAMING_SNAKE_CASE__ ), v) for k, v in d.items() )
__lowerCAmelCase : Tuple = '''<s> <pad> </s> <unk>'''.split()
# restore the special tokens
for k in keep_keys:
del da[F"""{k}</w>"""]
__lowerCAmelCase : List[Any] = d[k] # restore
return da
def _lowercase ( __snake_case ,__snake_case ) -> str:
# prep
if not os.path.exists(SCREAMING_SNAKE_CASE__ ):
raise ValueError(F"""path {biogpt_checkpoint_path} does not exist!""" )
os.makedirs(SCREAMING_SNAKE_CASE__ ,exist_ok=SCREAMING_SNAKE_CASE__ )
print(F"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
__lowerCAmelCase : Optional[int] = os.path.join(SCREAMING_SNAKE_CASE__ ,"checkpoint.pt" )
if not os.path.isfile(SCREAMING_SNAKE_CASE__ ):
raise ValueError(F"""path to the file {checkpoint_file} does not exist!""" )
__lowerCAmelCase : Tuple = torch.load(SCREAMING_SNAKE_CASE__ ,map_location="cpu" )
__lowerCAmelCase : Union[str, Any] = chkpt['''cfg''']['''model''']
# dicts
__lowerCAmelCase : List[Any] = os.path.join(SCREAMING_SNAKE_CASE__ ,"dict.txt" )
if not os.path.isfile(SCREAMING_SNAKE_CASE__ ):
raise ValueError(F"""path to the file {dict_file} does not exist!""" )
__lowerCAmelCase : List[str] = Dictionary.load(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase : Dict = rewrite_dict_keys(src_dict.indices )
__lowerCAmelCase : List[str] = len(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase : List[Any] = os.path.join(SCREAMING_SNAKE_CASE__ ,VOCAB_FILES_NAMES["vocab_file"] )
print(F"""Generating {src_vocab_file} of {src_vocab_size} records""" )
with open(SCREAMING_SNAKE_CASE__ ,"w" ,encoding="utf-8" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ ,ensure_ascii=SCREAMING_SNAKE_CASE__ ,indent=SCREAMING_SNAKE_CASE__ ) )
# merges_file (bpecodes)
__lowerCAmelCase : Optional[int] = os.path.join(SCREAMING_SNAKE_CASE__ ,"bpecodes" )
if not os.path.isfile(SCREAMING_SNAKE_CASE__ ):
raise ValueError(F"""path to the file {bpecodes_file} does not exist!""" )
__lowerCAmelCase : str = os.path.join(SCREAMING_SNAKE_CASE__ ,VOCAB_FILES_NAMES["merges_file"] )
shutil.copyfile(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
# model config
__lowerCAmelCase : int = os.path.join(SCREAMING_SNAKE_CASE__ ,"config.json" )
__lowerCAmelCase : Dict = {
'''activation_dropout''': args['''activation_dropout'''],
'''architectures''': ['''BioGptForCausalLM'''],
'''attention_probs_dropout_prob''': args['''attention_dropout'''],
'''bos_token_id''': 0,
'''eos_token_id''': 2,
'''hidden_act''': args['''activation_fn'''],
'''hidden_dropout_prob''': args['''dropout'''],
'''hidden_size''': args['''decoder_embed_dim'''],
'''initializer_range''': 0.02,
'''intermediate_size''': args['''decoder_ffn_embed_dim'''],
'''layer_norm_eps''': 1e-1_2,
'''layerdrop''': args['''decoder_layerdrop'''],
'''max_position_embeddings''': args['''max_target_positions'''],
'''model_type''': '''biogpt''',
'''num_attention_heads''': args['''decoder_attention_heads'''],
'''num_hidden_layers''': args['''decoder_layers'''],
'''pad_token_id''': 1,
'''scale_embedding''': not args['''no_scale_embedding'''],
'''tie_word_embeddings''': args['''share_decoder_input_output_embed'''],
'''vocab_size''': src_vocab_size,
}
# good hparam defaults to start with
print(F"""Generating {biogpt_model_config_file}""" )
with open(SCREAMING_SNAKE_CASE__ ,"w" ,encoding="utf-8" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ ,ensure_ascii=SCREAMING_SNAKE_CASE__ ,indent=SCREAMING_SNAKE_CASE__ ) )
# tokenizer config
__lowerCAmelCase : Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase : str = {
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
'''model_max_length''': 1_024,
'''pad_token''': '''<pad>''',
'''special_tokens_map_file''': None,
'''tokenizer_class''': '''BioGptTokenizer''',
'''unk_token''': '''<unk>''',
}
print(F"""Generating {biogpt_tokenizer_config_file}""" )
with open(SCREAMING_SNAKE_CASE__ ,"w" ,encoding="utf-8" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ ,ensure_ascii=SCREAMING_SNAKE_CASE__ ,indent=SCREAMING_SNAKE_CASE__ ) )
# model
__lowerCAmelCase : Optional[int] = chkpt['''model''']
# remove unneeded keys
__lowerCAmelCase : Union[str, Any] = [
'''decoder.version''',
]
for k in ignore_keys:
model_state_dict.pop(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase : Optional[int] = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("output_projection.weight" ):
__lowerCAmelCase : Optional[Any] = model_state_dict.pop(SCREAMING_SNAKE_CASE__ )
else:
__lowerCAmelCase : List[Any] = model_state_dict.pop(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase : str = BioGptConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase : Optional[int] = BioGptForCausalLM(SCREAMING_SNAKE_CASE__ )
# check that it loads ok
model_new.load_state_dict(SCREAMING_SNAKE_CASE__ )
# save
__lowerCAmelCase : int = os.path.join(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
print(F"""Generating {pytorch_weights_dump_path}""" )
torch.save(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
print("Conversion is done!" )
if __name__ == "__main__":
__snake_case : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--biogpt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__snake_case : Tuple = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path) | 269 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class __a :
def __init__( self : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : Optional[int]=13 , __magic_name__ : str=7 , __magic_name__ : Dict=True , __magic_name__ : Dict=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : Tuple=99 , __magic_name__ : List[str]=32 , __magic_name__ : int=2 , __magic_name__ : List[str]=4 , __magic_name__ : Tuple=37 , __magic_name__ : Dict="gelu" , __magic_name__ : int=0.1 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : Optional[int]=5_12 , __magic_name__ : Tuple=16 , __magic_name__ : Optional[int]=2 , __magic_name__ : Optional[int]=0.0_2 , __magic_name__ : Dict=3 , __magic_name__ : str=4 , __magic_name__ : Optional[Any]=None , __magic_name__ : Any=0 , ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : str = parent
UpperCAmelCase_ : List[Any] = batch_size
UpperCAmelCase_ : List[Any] = seq_length
UpperCAmelCase_ : Dict = is_training
UpperCAmelCase_ : Optional[Any] = use_input_mask
UpperCAmelCase_ : Tuple = use_token_type_ids
UpperCAmelCase_ : int = use_labels
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : Union[str, Any] = hidden_size
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : Any = num_attention_heads
UpperCAmelCase_ : Any = intermediate_size
UpperCAmelCase_ : Dict = hidden_act
UpperCAmelCase_ : Tuple = hidden_dropout_prob
UpperCAmelCase_ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : str = max_position_embeddings
UpperCAmelCase_ : str = type_vocab_size
UpperCAmelCase_ : List[str] = type_sequence_label_size
UpperCAmelCase_ : Tuple = initializer_range
UpperCAmelCase_ : str = num_labels
UpperCAmelCase_ : Tuple = num_choices
UpperCAmelCase_ : Union[str, Any] = scope
UpperCAmelCase_ : Union[str, Any] = projection_dim
def UpperCAmelCase__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
UpperCAmelCase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Dict = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
UpperCAmelCase_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Tuple = None
if self.use_token_type_ids:
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : int = None
if self.use_labels:
UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : Optional[Any] = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , )
UpperCAmelCase_ : List[str] = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : str , __magic_name__ : str , __magic_name__ : Tuple , __magic_name__ : int , __magic_name__ : List[str] , __magic_name__ : List[str] , __magic_name__ : Tuple , __magic_name__ : Any ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = TFDPRContextEncoder(config=__magic_name__ )
UpperCAmelCase_ : Tuple = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ )
UpperCAmelCase_ : int = model(__magic_name__ , token_type_ids=__magic_name__ )
UpperCAmelCase_ : Any = model(__magic_name__ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : int , __magic_name__ : Dict , __magic_name__ : Tuple , __magic_name__ : Dict , __magic_name__ : str , __magic_name__ : Any , __magic_name__ : Tuple ) -> int:
"""simple docstring"""
UpperCAmelCase_ : List[str] = TFDPRQuestionEncoder(config=__magic_name__ )
UpperCAmelCase_ : Optional[int] = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ )
UpperCAmelCase_ : Optional[int] = model(__magic_name__ , token_type_ids=__magic_name__ )
UpperCAmelCase_ : List[Any] = model(__magic_name__ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Any , __magic_name__ : int , __magic_name__ : Optional[int] , __magic_name__ : Tuple , __magic_name__ : List[Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : int = TFDPRReader(config=__magic_name__ )
UpperCAmelCase_ : Tuple = model(__magic_name__ , attention_mask=__magic_name__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Optional[int] = config_and_inputs
UpperCAmelCase_ : Any = {'''input_ids''': input_ids}
return config, inputs_dict
@require_tf
class __a (lowerCamelCase , lowerCamelCase , unittest.TestCase ):
__a : Any = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
__a : int = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {}
__a : str = False
__a : str = False
__a : Dict = False
__a : Optional[Any] = False
__a : Any = False
def UpperCAmelCase__ ( self : int ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = TFDPRModelTester(self )
UpperCAmelCase_ : Dict = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def UpperCAmelCase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*__magic_name__ )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*__magic_name__ )
def UpperCAmelCase__ ( self : int ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*__magic_name__ )
@slow
def UpperCAmelCase__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Union[str, Any] = TFDPRContextEncoder.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Optional[Any] = TFDPRContextEncoder.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Tuple = TFDPRQuestionEncoder.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Tuple = TFDPRReader.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
@require_tf
class __a (unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self : Optional[int] ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Any = TFDPRQuestionEncoder.from_pretrained('''facebook/dpr-question_encoder-single-nq-base''' )
UpperCAmelCase_ : Optional[int] = tf.constant(
[[1_01, 75_92, 10_10, 20_03, 20_26, 38_99, 1_01_40, 10_29, 1_02]] ) # [CLS] hello, is my dog cute? [SEP]
UpperCAmelCase_ : List[Any] = model(__magic_name__ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
UpperCAmelCase_ : List[str] = tf.constant(
[
[
0.0_3_2_3_6_2_5_3,
0.1_2_7_5_3_3_3_5,
0.1_6_8_1_8_5_0_9,
0.0_0_2_7_9_7_8_6,
0.3_8_9_6_9_3_3,
0.2_4_2_6_4_9_4_5,
0.2_1_7_8_9_7_1,
-0.0_2_3_3_5_2_2_7,
-0.0_8_4_8_1_9_5_9,
-0.1_4_3_2_4_1_1_7,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 125 | 0 |
from __future__ import annotations
from collections import Counter
from random import random
class _UpperCamelCase:
"""simple docstring"""
def __init__( self : Optional[Any] ):
lowercase__ : str = {}
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : str ):
lowercase__ : Any = {}
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : str ):
if nodea not in self.connections:
self.add_node(SCREAMING_SNAKE_CASE )
if nodea not in self.connections:
self.add_node(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = probability
def snake_case ( self : List[Any] ):
return list(self.connections )
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
lowercase__ : Optional[Any] = 0
lowercase__ : Dict = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Dict = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : Dict = Counter(graph.get_nodes() )
lowercase__ : str = start
for _ in range(lowerCamelCase__ ):
lowercase__ : List[str] = graph.transition(lowerCamelCase__ )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class snake_case__:
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[str]=13 , SCREAMING_SNAKE_CASE : Union[str, Any]=10 , SCREAMING_SNAKE_CASE : Union[str, Any]=3 , SCREAMING_SNAKE_CASE : int=2 , SCREAMING_SNAKE_CASE : int=2 , SCREAMING_SNAKE_CASE : Optional[int]=True , SCREAMING_SNAKE_CASE : str=True , SCREAMING_SNAKE_CASE : int=32 , SCREAMING_SNAKE_CASE : Dict=5 , SCREAMING_SNAKE_CASE : str=4 , SCREAMING_SNAKE_CASE : Optional[int]=37 , SCREAMING_SNAKE_CASE : Any="gelu" , SCREAMING_SNAKE_CASE : List[Any]=0.1 , SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE : Dict=10 , SCREAMING_SNAKE_CASE : Optional[int]=0.02 , SCREAMING_SNAKE_CASE : str="divided_space_time" , SCREAMING_SNAKE_CASE : Tuple=None , ):
lowercase__ : List[str] = parent
lowercase__ : Optional[int] = batch_size
lowercase__ : List[Any] = image_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : List[str] = patch_size
lowercase__ : str = num_frames
lowercase__ : List[str] = is_training
lowercase__ : List[str] = use_labels
lowercase__ : int = hidden_size
lowercase__ : List[Any] = num_hidden_layers
lowercase__ : Optional[Any] = num_attention_heads
lowercase__ : Dict = intermediate_size
lowercase__ : Union[str, Any] = hidden_act
lowercase__ : str = hidden_dropout_prob
lowercase__ : Tuple = attention_probs_dropout_prob
lowercase__ : Tuple = attention_type
lowercase__ : Union[str, Any] = initializer_range
lowercase__ : Any = scope
lowercase__ : Optional[int] = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
lowercase__ : Union[str, Any] = (image_size // patch_size) ** 2
lowercase__ : Union[str, Any] = (num_frames) * self.num_patches_per_frame + 1
def snake_case ( self : Optional[int] ):
lowercase__ : Any = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Optional[Any] = None
if self.use_labels:
lowercase__ : str = ids_tensor([self.batch_size] , self.num_labels )
lowercase__ : List[Any] = self.get_config()
return config, pixel_values, labels
def snake_case ( self : Any ):
lowercase__ : Optional[int] = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
lowercase__ : List[Any] = self.num_labels
return config
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Tuple ):
lowercase__ : Optional[Any] = TimesformerModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any ):
lowercase__ : List[Any] = TimesformerForVideoClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : List[str] = model(SCREAMING_SNAKE_CASE )
# verify the logits shape
lowercase__ : List[str] = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
lowercase__ : List[str] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : int = config_and_inputs
lowercase__ : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case__(_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
lowercase_ = (
{"""feature-extraction""": TimesformerModel, """video-classification""": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def snake_case ( self : Dict ):
lowercase__ : Tuple = TimesformerModelTester(self )
lowercase__ : Any = ConfigTester(
self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 )
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Tuple=False ):
lowercase__ : Union[str, Any] = copy.deepcopy(SCREAMING_SNAKE_CASE )
if return_labels:
if model_class in get_values(SCREAMING_SNAKE_CASE ):
lowercase__ : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE )
return inputs_dict
def snake_case ( self : Optional[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="TimeSformer does not use inputs_embeds" )
def snake_case ( self : Any ):
pass
def snake_case ( self : Tuple ):
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE , nn.Linear ) )
def snake_case ( self : Union[str, Any] ):
lowercase__ , lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : str = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Any = [*signature.parameters.keys()]
lowercase__ : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def snake_case ( self : Tuple ):
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*SCREAMING_SNAKE_CASE )
@slow
def snake_case ( self : Optional[int] ):
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Optional[int] = TimesformerModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def snake_case ( self : Any ):
if not self.has_attentions:
pass
else:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Union[str, Any] = True
for model_class in self.all_model_classes:
lowercase__ : List[str] = self.model_tester.seq_length
lowercase__ : Any = self.model_tester.num_frames
lowercase__ : Optional[int] = True
lowercase__ : List[str] = False
lowercase__ : List[Any] = True
lowercase__ : Tuple = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowercase__ : Any = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowercase__ : List[Any] = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ : Dict = True
lowercase__ : int = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowercase__ : Optional[Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowercase__ : Any = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
lowercase__ : Any = len(SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
lowercase__ : Tuple = True
lowercase__ : Tuple = True
lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowercase__ : Union[str, Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
self.assertEqual(out_len + 1 , len(SCREAMING_SNAKE_CASE ) )
lowercase__ : Optional[Any] = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def snake_case ( self : List[Any] ):
def check_hidden_states_output(SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, Any] ):
lowercase__ : Optional[int] = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowercase__ : Tuple = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowercase__ : Optional[Any] = outputs.hidden_states
lowercase__ : List[str] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
lowercase__ : int = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Dict = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Tuple = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Optional[Any] = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
lowercase__ : Optional[Any] = np.load(lowerCamelCase__ )
return list(lowerCamelCase__ )
@require_torch
@require_vision
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self : Dict ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def snake_case ( self : List[Any] ):
lowercase__ : List[Any] = TimesformerForVideoClassification.from_pretrained("facebook/timesformer-base-finetuned-k400" ).to(
SCREAMING_SNAKE_CASE )
lowercase__ : int = self.default_image_processor
lowercase__ : List[str] = prepare_video()
lowercase__ : str = image_processor(video[:8] , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowercase__ : str = model(**SCREAMING_SNAKE_CASE )
# verify the logits
lowercase__ : Union[str, Any] = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = torch.tensor([-0.3_016, -0.7_713, -0.4_205] ).to(SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 121 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
__lowercase = logging.get_logger(__name__)
__lowercase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__lowercase = {
"""vocab_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/vocab.txt""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/vocab.txt""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"""
),
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"""
),
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt""",
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"""
),
"""bert-base-multilingual-cased""": (
"""https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-cased""": (
"""https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"""
),
},
}
__lowercase = {
"""bert-base-uncased""": 512,
"""bert-large-uncased""": 512,
"""bert-base-cased""": 512,
"""bert-large-cased""": 512,
"""bert-base-multilingual-uncased""": 512,
"""bert-base-multilingual-cased""": 512,
"""bert-base-chinese""": 512,
"""bert-base-german-cased""": 512,
"""bert-large-uncased-whole-word-masking""": 512,
"""bert-large-cased-whole-word-masking""": 512,
"""bert-large-uncased-whole-word-masking-finetuned-squad""": 512,
"""bert-large-cased-whole-word-masking-finetuned-squad""": 512,
"""bert-base-cased-finetuned-mrpc""": 512,
"""bert-base-german-dbmdz-cased""": 512,
"""bert-base-german-dbmdz-uncased""": 512,
"""TurkuNLP/bert-base-finnish-cased-v1""": 512,
"""TurkuNLP/bert-base-finnish-uncased-v1""": 512,
"""wietsedv/bert-base-dutch-cased""": 512,
}
__lowercase = {
"""bert-base-uncased""": {"""do_lower_case""": True},
"""bert-large-uncased""": {"""do_lower_case""": True},
"""bert-base-cased""": {"""do_lower_case""": False},
"""bert-large-cased""": {"""do_lower_case""": False},
"""bert-base-multilingual-uncased""": {"""do_lower_case""": True},
"""bert-base-multilingual-cased""": {"""do_lower_case""": False},
"""bert-base-chinese""": {"""do_lower_case""": False},
"""bert-base-german-cased""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": False},
"""bert-base-cased-finetuned-mrpc""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-cased""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-uncased""": {"""do_lower_case""": True},
"""TurkuNLP/bert-base-finnish-cased-v1""": {"""do_lower_case""": False},
"""TurkuNLP/bert-base-finnish-uncased-v1""": {"""do_lower_case""": True},
"""wietsedv/bert-base-dutch-cased""": {"""do_lower_case""": False},
}
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : List[Any] = VOCAB_FILES_NAMES
UpperCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : Tuple = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : List[str] = BertTokenizer
def __init__( self : Dict , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : Any=None , __UpperCAmelCase : Any=True , __UpperCAmelCase : Tuple="[UNK]" , __UpperCAmelCase : Union[str, Any]="[SEP]" , __UpperCAmelCase : List[Any]="[PAD]" , __UpperCAmelCase : int="[CLS]" , __UpperCAmelCase : List[str]="[MASK]" , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : int=None , **__UpperCAmelCase : Optional[Any] , ):
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , **__UpperCAmelCase , )
a : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("lowercase" , __UpperCAmelCase) != do_lower_case
or normalizer_state.get("strip_accents" , __UpperCAmelCase) != strip_accents
or normalizer_state.get("handle_chinese_chars" , __UpperCAmelCase) != tokenize_chinese_chars
):
a : Tuple = getattr(__UpperCAmelCase , normalizer_state.pop("type"))
a : Optional[int] = do_lower_case
a : Optional[int] = strip_accents
a : Union[str, Any] = tokenize_chinese_chars
a : Union[str, Any] = normalizer_class(**__UpperCAmelCase)
a : Optional[Any] = do_lower_case
def __snake_case ( self : Dict , __UpperCAmelCase : str , __UpperCAmelCase : Any=None):
a : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __snake_case ( self : List[Any] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None):
a : Tuple = [self.sep_token_id]
a : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def __snake_case ( self : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None):
a : Dict = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase)
return tuple(__UpperCAmelCase)
| 40 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCamelCase ( metaclass=lowercase_ ):
'''simple docstring'''
__snake_case = ['speech']
def __init__( self : Tuple , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : Dict ) -> int:
'''simple docstring'''
requires_backends(self , ["""speech"""] )
class lowerCamelCase ( metaclass=lowercase_ ):
'''simple docstring'''
__snake_case = ['speech']
def __init__( self : Union[str, Any] , *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : List[str] ) -> str:
'''simple docstring'''
requires_backends(self , ["""speech"""] )
| 134 | 0 |
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
lowerCamelCase : List[str] = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = '''T5Config'''
def snake_case_ ( lowerCAmelCase_ : jnp.array , lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
__lowercase : Tuple = jnp.zeros_like(_lowercase )
__lowercase : Tuple = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
__lowercase : Dict = shifted_input_ids.at[:, 0].set(_lowercase )
__lowercase : Optional[Any] = jnp.where(shifted_input_ids == -100 , _lowercase , _lowercase )
return shifted_input_ids
class lowerCAmelCase ( a__ ):
'''simple docstring'''
_A : Any = """mt5"""
_A : Dict = MTaConfig
class lowerCAmelCase ( a__ ):
'''simple docstring'''
_A : str = """mt5"""
_A : Tuple = MTaConfig
class lowerCAmelCase ( a__ ):
'''simple docstring'''
_A : List[str] = """mt5"""
_A : Tuple = MTaConfig | 363 |
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def snake_case_ ( lowerCAmelCase_ : Tuple ):
if isinstance(lowerCAmelCase_ , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class lowerCAmelCase :
'''simple docstring'''
def lowerCAmelCase ( self : Any , __a : Any , __a : List[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : np.ndarray , __a : float ) -> List[Any]:
"""simple docstring"""
__lowercase : List[str] = np.abs((a - b) ).max()
self.assertLessEqual(__a , __a , F"Difference between torch and flax is {diff} (>= {tol})." )
def lowerCAmelCase ( self : Tuple , __a : int , __a : str , __a : Union[str, Any] , __a : Optional[Any] , __a : Optional[Any]=None , **__a : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowercase : str = VisionTextDualEncoderConfig.from_vision_text_configs(__a , __a )
__lowercase : str = FlaxVisionTextDualEncoderModel(__a )
__lowercase : Optional[Any] = model(input_ids=__a , pixel_values=__a , attention_mask=__a )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def lowerCAmelCase ( self : Optional[int] , __a : Optional[int] , __a : Dict , __a : Dict , __a : List[str] , __a : Optional[Any]=None , **__a : str ) -> str:
"""simple docstring"""
__lowercase , __lowercase : List[str] = self.get_vision_text_model(__a , __a )
__lowercase : Union[str, Any] = {"""vision_model""": vision_model, """text_model""": text_model}
__lowercase : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__a )
__lowercase : Any = model(input_ids=__a , pixel_values=__a , attention_mask=__a )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def lowerCAmelCase ( self : Tuple , __a : Union[str, Any] , __a : Union[str, Any] , __a : Union[str, Any] , __a : Dict , __a : int=None , **__a : int ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : Tuple = self.get_vision_text_model(__a , __a )
__lowercase : Union[str, Any] = {"""vision_model""": vision_model, """text_model""": text_model}
__lowercase : List[str] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__a )
__lowercase : List[Any] = model(input_ids=__a , pixel_values=__a , attention_mask=__a )
__lowercase : int = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a )
__lowercase : int = FlaxVisionTextDualEncoderModel.from_pretrained(__a )
__lowercase : Tuple = model(input_ids=__a , pixel_values=__a , attention_mask=__a )
__lowercase : int = after_output[0]
__lowercase : int = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__a , 1E-3 )
def lowerCAmelCase ( self : List[Any] , __a : Any , __a : Tuple , __a : Optional[int] , __a : str , __a : Optional[Any]=None , **__a : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : str = self.get_vision_text_model(__a , __a )
__lowercase : Optional[Any] = {"""vision_model""": vision_model, """text_model""": text_model}
__lowercase : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__a )
__lowercase : Union[str, Any] = model(
input_ids=__a , pixel_values=__a , attention_mask=__a , output_attentions=__a )
__lowercase : Optional[int] = output.vision_model_output.attentions
self.assertEqual(len(__a ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowercase : Optional[int] = to_atuple(vision_model.config.image_size )
__lowercase : List[str] = to_atuple(vision_model.config.patch_size )
__lowercase : Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowercase : int = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__lowercase : Dict = output.text_model_output.attentions
self.assertEqual(len(__a ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def lowerCAmelCase ( self : Optional[int] , __a : List[str] , __a : List[Any] , __a : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
pt_model.to(__a )
pt_model.eval()
# prepare inputs
__lowercase : Union[str, Any] = inputs_dict
__lowercase : List[Any] = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
__lowercase : Union[str, Any] = pt_model(**__a ).to_tuple()
__lowercase : Tuple = fx_model(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(__a , pt_output.numpy() , 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(__a )
__lowercase : Any = FlaxVisionTextDualEncoderModel.from_pretrained(__a , from_pt=__a )
__lowercase : Dict = fx_model_loaded(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(__a , pt_output.numpy() , 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(__a )
__lowercase : str = VisionTextDualEncoderModel.from_pretrained(__a , from_flax=__a )
pt_model_loaded.to(__a )
pt_model_loaded.eval()
with torch.no_grad():
__lowercase : List[Any] = pt_model_loaded(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(__a , pt_output_loaded.numpy() , 4E-2 )
def lowerCAmelCase ( self : Optional[int] , __a : List[Any] , __a : int , __a : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Union[str, Any] = VisionTextDualEncoderConfig.from_vision_text_configs(__a , __a )
__lowercase : str = VisionTextDualEncoderModel(__a )
__lowercase : Union[str, Any] = FlaxVisionTextDualEncoderModel(__a )
__lowercase : List[str] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __a )
__lowercase : Any = fx_state
self.check_pt_flax_equivalence(__a , __a , __a )
def lowerCAmelCase ( self : Any , __a : Any , __a : Dict , __a : Tuple ) -> str:
"""simple docstring"""
__lowercase : int = VisionTextDualEncoderConfig.from_vision_text_configs(__a , __a )
__lowercase : Union[str, Any] = VisionTextDualEncoderModel(__a )
__lowercase : Dict = FlaxVisionTextDualEncoderModel(__a )
__lowercase : Tuple = load_flax_weights_in_pytorch_model(__a , fx_model.params )
self.check_pt_flax_equivalence(__a , __a , __a )
def lowerCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**__a )
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**__a )
def lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase : List[str] = self.prepare_config_and_inputs()
self.check_save_load(**__a )
def lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
__lowercase : str = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**__a )
@is_pt_flax_cross_test
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[Any] = self.prepare_config_and_inputs()
__lowercase : Optional[int] = config_inputs_dict.pop("""vision_config""" )
__lowercase : Optional[int] = config_inputs_dict.pop("""text_config""" )
__lowercase : Dict = config_inputs_dict
self.check_equivalence_pt_to_flax(__a , __a , __a )
self.check_equivalence_flax_to_pt(__a , __a , __a )
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase , __lowercase : List[Any] = self.get_pretrained_model_and_inputs()
__lowercase : Dict = model_a(**__a )
__lowercase : Any = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(__a )
__lowercase : Tuple = FlaxVisionTextDualEncoderModel.from_pretrained(__a )
__lowercase : Optional[int] = model_a(**__a )
__lowercase : Tuple = after_outputs[0]
__lowercase : Union[str, Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__a , 1E-5 )
@require_flax
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
__lowercase : int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=__a , text_from_pt=__a , )
__lowercase : int = 13
__lowercase : Union[str, Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
__lowercase : Dict = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
__lowercase : Tuple = random_attention_mask([batch_size, 4] )
__lowercase : str = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def lowerCAmelCase ( self : Optional[Any] , __a : Union[str, Any] , __a : int ) -> Dict:
"""simple docstring"""
__lowercase : int = FlaxViTModel(__a )
__lowercase : List[Any] = FlaxBertModel(__a )
return vision_model, text_model
def lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Tuple = FlaxViTModelTester(self )
__lowercase : str = FlaxBertModelTester(self )
__lowercase : List[str] = vit_model_tester.prepare_config_and_inputs()
__lowercase : Union[str, Any] = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase : Optional[int] = vision_config_and_inputs
__lowercase , __lowercase , __lowercase , __lowercase : Any = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-clip""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=__a , text_from_pt=__a , )
__lowercase : Tuple = 13
__lowercase : Optional[Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
__lowercase : Tuple = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
__lowercase : List[Any] = random_attention_mask([batch_size, 4] )
__lowercase : int = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def lowerCAmelCase ( self : str , __a : str , __a : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : Dict = FlaxCLIPVisionModel(__a )
__lowercase : Optional[Any] = FlaxBertModel(__a )
return vision_model, text_model
def lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__lowercase : List[Any] = FlaxCLIPVisionModelTester(self )
__lowercase : Optional[Any] = FlaxBertModelTester(self )
__lowercase : Any = clip_model_tester.prepare_config_and_inputs()
__lowercase : Optional[Any] = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase : Dict = vision_config_and_inputs
__lowercase , __lowercase , __lowercase , __lowercase : Optional[int] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = FlaxVisionTextDualEncoderModel.from_pretrained("""clip-italian/clip-italian""" , logit_scale_init_value=1.0 )
__lowercase : int = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
__lowercase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__lowercase : Tuple = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=__a , padding=__a , return_tensors="""np""" )
__lowercase : Optional[int] = model(**__a )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__lowercase : Optional[Any] = np.array([[1.2284727, 0.3104122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , __a , atol=1E-3 ) ) | 306 | 0 |
'''simple docstring'''
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
lowerCAmelCase : Optional[int] =data_utils.TransfoXLTokenizer
lowerCAmelCase : str =data_utils.TransfoXLCorpus
lowerCAmelCase : Union[str, Any] =data_utils
lowerCAmelCase : List[str] =data_utils
def UpperCAmelCase_ ( __lowerCamelCase : str ,__lowerCamelCase : Tuple ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : Optional[int] ):
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(_UpperCamelCase ,"rb" ) as fp:
lowercase_ :Dict = pickle.load(_UpperCamelCase ,encoding="latin1" )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
lowercase_ :Union[str, Any] = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file']
print(F'Save vocabulary to {pytorch_vocab_dump_path}' )
lowercase_ :Optional[int] = corpus.vocab.__dict__
torch.save(_UpperCamelCase ,_UpperCamelCase )
lowercase_ :Tuple = corpus.__dict__
corpus_dict_no_vocab.pop("vocab" ,_UpperCamelCase )
lowercase_ :Tuple = pytorch_dump_folder_path + '/' + CORPUS_NAME
print(F'Save dataset to {pytorch_dataset_dump_path}' )
torch.save(_UpperCamelCase ,_UpperCamelCase )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
lowercase_ :str = os.path.abspath(_UpperCamelCase )
lowercase_ :Any = os.path.abspath(_UpperCamelCase )
print(F'Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
lowercase_ :List[Any] = TransfoXLConfig()
else:
lowercase_ :List[Any] = TransfoXLConfig.from_json_file(_UpperCamelCase )
print(F'Building PyTorch model from configuration: {config}' )
lowercase_ :Union[str, Any] = TransfoXLLMHeadModel(_UpperCamelCase )
lowercase_ :Tuple = load_tf_weights_in_transfo_xl(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
# Save pytorch-model
lowercase_ :str = os.path.join(_UpperCamelCase ,_UpperCamelCase )
lowercase_ :Union[str, Any] = os.path.join(_UpperCamelCase ,_UpperCamelCase )
print(F'Save PyTorch model to {os.path.abspath(_UpperCamelCase )}' )
torch.save(model.state_dict() ,_UpperCamelCase )
print(F'Save configuration file to {os.path.abspath(_UpperCamelCase )}' )
with open(_UpperCamelCase ,"w" ,encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCAmelCase : Tuple =argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--tf_checkpoint_path''',
default='''''',
type=str,
help='''An optional path to a TensorFlow checkpoint path to be converted.''',
)
parser.add_argument(
'''--transfo_xl_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--transfo_xl_dataset_file''',
default='''''',
type=str,
help='''An optional dataset file to be converted in a vocabulary.''',
)
lowerCAmelCase : Dict =parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 223 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return round(float(moles / volume ) * nfactor )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 | 0 |
'''simple docstring'''
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class UpperCAmelCase__ ( unittest.TestCase):
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = logging.get_logger()
# the current default level is logging.WARNING
__UpperCamelCase = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(lowercase )
def __lowerCamelCase ( self ) -> Tuple:
__UpperCamelCase = logging.get_verbosity()
__UpperCamelCase = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
__UpperCamelCase = """Testing 1, 2, 3"""
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(lowercase ) as cl:
logger.warning(lowercase )
self.assertEqual(cl.out , msg + """\n""" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(lowercase ) as cl:
logger.warning(lowercase )
self.assertEqual(cl.out , """""" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(lowercase ) as cl:
logger.warning(lowercase )
self.assertEqual(cl.out , msg + """\n""" )
# restore to the original level
logging.set_verbosity(lowercase )
@mockenv(TRANSFORMERS_VERBOSITY="""error""" )
def __lowerCamelCase ( self ) -> Tuple:
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
__UpperCamelCase = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
__UpperCamelCase = os.getenv("""TRANSFORMERS_VERBOSITY""" , lowercase )
__UpperCamelCase = logging.log_levels[env_level_str]
__UpperCamelCase = logging.get_verbosity()
self.assertEqual(
lowercase , lowercase , f"TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}" , )
# restore to the original level
__UpperCamelCase = """"""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="""super-error""" )
def __lowerCamelCase ( self ) -> Any:
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
__UpperCamelCase = logging.logging.getLogger()
with CaptureLogger(lowercase ) as cl:
# this action activates the env var
logging.get_logger("""transformers.models.bart.tokenization_bart""" )
self.assertIn("""Unknown option TRANSFORMERS_VERBOSITY=super-error""" , cl.out )
# no need to restore as nothing was changed
def __lowerCamelCase ( self ) -> Union[str, Any]:
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
__UpperCamelCase = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
__UpperCamelCase = """Testing 1, 2, 3"""
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""1""" ):
# nothing should be logged as env var disables this method
with CaptureLogger(lowercase ) as cl:
logger.warning_advice(lowercase )
self.assertEqual(cl.out , """""" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""""" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(lowercase ) as cl:
logger.warning_advice(lowercase )
self.assertEqual(cl.out , msg + """\n""" )
def _lowercase ( ):
'''simple docstring'''
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 243 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = filter(lambda __A : p.requires_grad ,model.parameters() )
__UpperCamelCase = sum([np.prod(p.size() ) for p in model_parameters] )
return params
a__ : Optional[Any] = logging.getLogger(__name__)
def _lowercase ( __A ,__A ):
'''simple docstring'''
if metric == "rouge2":
__UpperCamelCase = """{val_avg_rouge2:.4f}-{step_count}"""
elif metric == "bleu":
__UpperCamelCase = """{val_avg_bleu:.4f}-{step_count}"""
elif metric == "em":
__UpperCamelCase = """{val_avg_em:.4f}-{step_count}"""
else:
raise NotImplementedError(
f"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"
""" function.""" )
__UpperCamelCase = ModelCheckpoint(
dirpath=__A ,filename=__A ,monitor=f"val_{metric}" ,mode="""max""" ,save_top_k=3 ,every_n_epochs=1 ,)
return checkpoint_callback
def _lowercase ( __A ,__A ):
'''simple docstring'''
return EarlyStopping(
monitor=f"val_{metric}" ,mode="""min""" if """loss""" in metric else """max""" ,patience=__A ,verbose=__A ,)
class UpperCAmelCase__ ( pl.Callback):
def __lowerCamelCase ( self , lowercase , lowercase ) -> Dict:
__UpperCamelCase = {f"lr_group_{i}": param["""lr"""] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(lowercase )
@rank_zero_only
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase=True ) -> None:
logger.info(f"***** {type_path} results at step {trainer.global_step:05d} *****" )
__UpperCamelCase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["""log""", """progress_bar""", """preds"""]} )
# Log results
__UpperCamelCase = Path(pl_module.hparams.output_dir )
if type_path == "test":
__UpperCamelCase = od / """test_results.txt"""
__UpperCamelCase = od / """test_generations.txt"""
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__UpperCamelCase = od / f"{type_path}_results/{trainer.global_step:05d}.txt"
__UpperCamelCase = od / f"{type_path}_generations/{trainer.global_step:05d}.txt"
results_file.parent.mkdir(exist_ok=lowercase )
generations_file.parent.mkdir(exist_ok=lowercase )
with open(lowercase , """a+""" ) as writer:
for key in sorted(lowercase ):
if key in ["log", "progress_bar", "preds"]:
continue
__UpperCamelCase = metrics[key]
if isinstance(lowercase , torch.Tensor ):
__UpperCamelCase = val.item()
__UpperCamelCase = f"{key}: {val:.6f}\n"
writer.write(lowercase )
if not save_generations:
return
if "preds" in metrics:
__UpperCamelCase = """\n""".join(metrics["""preds"""] )
generations_file.open("""w+""" ).write(lowercase )
@rank_zero_only
def __lowerCamelCase ( self , lowercase , lowercase ) -> str:
try:
__UpperCamelCase = pl_module.model.model.num_parameters()
except AttributeError:
__UpperCamelCase = pl_module.model.num_parameters()
__UpperCamelCase = count_trainable_parameters(lowercase )
# mp stands for million parameters
trainer.logger.log_metrics({"""n_params""": npars, """mp""": npars / 1E6, """grad_mp""": n_trainable_pars / 1E6} )
@rank_zero_only
def __lowerCamelCase ( self , lowercase , lowercase ) -> Optional[Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(lowercase , lowercase , """test""" )
@rank_zero_only
def __lowerCamelCase ( self , lowercase , lowercase ) -> int:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 243 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'unc-nlp/lxmert-base-uncased': 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json',
}
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Any = "lxmert"
lowerCAmelCase : Union[str, Any] = {}
def __init__( self : List[str] ,_snake_case : Any=30_522 ,_snake_case : Optional[Any]=768 ,_snake_case : Any=12 ,_snake_case : Optional[int]=9_500 ,_snake_case : Optional[Any]=1_600 ,_snake_case : Union[str, Any]=400 ,_snake_case : Optional[int]=3_072 ,_snake_case : List[Any]="gelu" ,_snake_case : Dict=0.1 ,_snake_case : Union[str, Any]=0.1 ,_snake_case : Optional[int]=512 ,_snake_case : Any=2 ,_snake_case : Tuple=0.02 ,_snake_case : Any=1e-12 ,_snake_case : str=9 ,_snake_case : Optional[int]=5 ,_snake_case : List[str]=5 ,_snake_case : int=2_048 ,_snake_case : Optional[int]=4 ,_snake_case : str=6.67 ,_snake_case : Optional[int]=True ,_snake_case : Optional[Any]=True ,_snake_case : str=True ,_snake_case : List[str]=True ,_snake_case : int=True ,_snake_case : Any=True ,_snake_case : Any=True ,**_snake_case : Optional[int] ,) -> Dict:
"""simple docstring"""
lowercase__ : Tuple = vocab_size
lowercase__ : str = hidden_size
lowercase__ : List[Any] = num_attention_heads
lowercase__ : Optional[Any] = hidden_act
lowercase__ : Optional[int] = intermediate_size
lowercase__ : List[str] = hidden_dropout_prob
lowercase__ : Dict = attention_probs_dropout_prob
lowercase__ : List[str] = max_position_embeddings
lowercase__ : Optional[Any] = type_vocab_size
lowercase__ : List[Any] = initializer_range
lowercase__ : List[Any] = layer_norm_eps
lowercase__ : Optional[int] = num_qa_labels
lowercase__ : List[Any] = num_object_labels
lowercase__ : int = num_attr_labels
lowercase__ : Optional[int] = l_layers
lowercase__ : Any = x_layers
lowercase__ : Optional[Any] = r_layers
lowercase__ : int = visual_feat_dim
lowercase__ : Optional[Any] = visual_pos_dim
lowercase__ : Optional[int] = visual_loss_normalizer
lowercase__ : List[str] = task_matched
lowercase__ : Optional[Any] = task_mask_lm
lowercase__ : Tuple = task_obj_predict
lowercase__ : int = task_qa
lowercase__ : Any = visual_obj_loss
lowercase__ : Optional[Any] = visual_attr_loss
lowercase__ : Tuple = visual_feat_loss
lowercase__ : str = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**_snake_case )
| 16 |
"""simple docstring"""
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowerCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(A_ )
class __A ( A_ ):
'''simple docstring'''
def __init__( self : List[str] ,**_snake_case : Dict ) -> List[Any]:
"""simple docstring"""
super().__init__(**_snake_case )
requires_backends(self ,'''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : Optional[int] ,_snake_case : Union[str, List[str], "Image", List["Image"]] ,**_snake_case : int ) -> Optional[Any]:
"""simple docstring"""
return super().__call__(_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Dict ,**_snake_case : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase__ : List[str] = {}
if "candidate_labels" in kwargs:
lowercase__ : Any = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
lowercase__ : Optional[Any] = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Optional[int] ,_snake_case : Dict=None ,_snake_case : Union[str, Any]="This is a photo of {}." ) -> List[str]:
"""simple docstring"""
lowercase__ : List[Any] = load_image(_snake_case )
lowercase__ : int = self.image_processor(images=[image] ,return_tensors=self.framework )
lowercase__ : str = candidate_labels
lowercase__ : Dict = [hypothesis_template.format(_snake_case ) for x in candidate_labels]
lowercase__ : Any = self.tokenizer(_snake_case ,return_tensors=self.framework ,padding=_snake_case )
lowercase__ : Optional[int] = [text_inputs]
return inputs
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase__ : Optional[int] = model_inputs.pop('''candidate_labels''' )
lowercase__ : Union[str, Any] = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] ,_snake_case ):
lowercase__ : List[str] = text_inputs[0]
else:
# Batching case.
lowercase__ : int = text_inputs[0][0]
lowercase__ : Tuple = self.model(**_snake_case ,**_snake_case )
lowercase__ : Union[str, Any] = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase ( self : Any ,_snake_case : Tuple ) -> Any:
"""simple docstring"""
lowercase__ : Dict = model_outputs.pop('''candidate_labels''' )
lowercase__ : Optional[Any] = model_outputs['''logits'''][0]
if self.framework == "pt":
lowercase__ : Optional[int] = logits.softmax(dim=-1 ).squeeze(-1 )
lowercase__ : Tuple = probs.tolist()
if not isinstance(_snake_case ,_snake_case ):
lowercase__ : Any = [scores]
elif self.framework == "tf":
lowercase__ : List[str] = stable_softmax(_snake_case ,axis=-1 )
lowercase__ : Optional[Any] = probs.numpy().tolist()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
lowercase__ : Union[str, Any] = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(_snake_case ,_snake_case ) ,key=lambda _snake_case : -x[0] )
]
return result
| 16 | 1 |
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class snake_case_ :
def __init__( self :str ,__snake_case :List[str] ,) -> int:
a__ = parent
a__ = 13
a__ = 7
a__ = True
a__ = True
a__ = True
a__ = 99
a__ = 32
a__ = 2
a__ = 4
a__ = 37
a__ = '''gelu'''
a__ = 0.1
a__ = 0.1
a__ = 5_12
a__ = 16
a__ = 2
a__ = 0.02
a__ = 3
a__ = 4
a__ = None
def lowerCamelCase__( self :Dict ) -> int:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
a__ = None
if self.use_input_mask:
a__ = random_attention_mask([self.batch_size, self.seq_length] )
a__ = None
a__ = None
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
a__ = ids_tensor([self.batch_size] ,self.num_choices )
a__ = EsmConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,pad_token_id=1 ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,)
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__( self :Tuple ) -> List[str]:
(
a__
) = self.prepare_config_and_inputs()
a__ = True
a__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
a__ = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCamelCase__( self :Any ,__snake_case :Optional[Any] ,__snake_case :Optional[Any] ,__snake_case :Union[str, Any] ,__snake_case :int ,__snake_case :Optional[int] ,__snake_case :Optional[int] ) -> Any:
a__ = TFEsmModel(config=_lowerCamelCase )
a__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
a__ = model(_lowerCamelCase )
a__ = [input_ids, input_mask]
a__ = model(_lowerCamelCase )
a__ = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__( self :Optional[Any] ,__snake_case :Any ,__snake_case :List[Any] ,__snake_case :Optional[int] ,__snake_case :str ,__snake_case :Union[str, Any] ,__snake_case :Tuple ,__snake_case :Union[str, Any] ,__snake_case :str ,) -> Optional[int]:
a__ = True
a__ = TFEsmModel(config=_lowerCamelCase )
a__ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''encoder_hidden_states''': encoder_hidden_states,
'''encoder_attention_mask''': encoder_attention_mask,
}
a__ = model(_lowerCamelCase )
a__ = [input_ids, input_mask]
a__ = model(_lowerCamelCase ,encoder_hidden_states=_lowerCamelCase )
# Also check the case where encoder outputs are not passed
a__ = model(_lowerCamelCase ,attention_mask=_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__( self :Tuple ,__snake_case :Optional[Any] ,__snake_case :int ,__snake_case :Any ,__snake_case :List[Any] ,__snake_case :Tuple ,__snake_case :Dict ) -> Union[str, Any]:
a__ = TFEsmForMaskedLM(config=_lowerCamelCase )
a__ = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__( self :Optional[int] ,__snake_case :List[Any] ,__snake_case :Optional[Any] ,__snake_case :Tuple ,__snake_case :Union[str, Any] ,__snake_case :str ,__snake_case :Optional[Any] ) -> Optional[int]:
a__ = self.num_labels
a__ = TFEsmForTokenClassification(config=_lowerCamelCase )
a__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
a__ = model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__( self :List[Any] ) -> Any:
a__ = self.prepare_config_and_inputs()
(
a__
) = config_and_inputs
a__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class snake_case_ (a__ , a__ , unittest.TestCase ):
UpperCAmelCase__ : str = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ : Any = (
{
'feature-extraction': TFEsmModel,
'fill-mask': TFEsmForMaskedLM,
'text-classification': TFEsmForSequenceClassification,
'token-classification': TFEsmForTokenClassification,
'zero-shot': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : Dict = False
def lowerCamelCase__( self :Optional[Any] ) -> Tuple:
a__ = TFEsmModelTester(self )
a__ = ConfigTester(self ,config_class=_lowerCamelCase ,hidden_size=37 )
def lowerCamelCase__( self :Any ) -> Optional[int]:
self.config_tester.run_common_tests()
def lowerCamelCase__( self :Any ) -> Union[str, Any]:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def lowerCamelCase__( self :Union[str, Any] ) -> int:
a__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_lowerCamelCase )
def lowerCamelCase__( self :Optional[Any] ) -> str:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCamelCase )
def lowerCamelCase__( self :List[str] ) -> Optional[Any]:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCamelCase )
@slow
def lowerCamelCase__( self :List[str] ) -> str:
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = TFEsmModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
@unittest.skip('Protein models do not support embedding resizing.' )
def lowerCamelCase__( self :Union[str, Any] ) -> Tuple:
pass
@unittest.skip('Protein models do not support embedding resizing.' )
def lowerCamelCase__( self :List[Any] ) -> Union[str, Any]:
pass
def lowerCamelCase__( self :List[Any] ) -> int:
a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = model_class(_lowerCamelCase )
assert isinstance(model.get_input_embeddings() ,tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
a__ = model.get_bias()
assert isinstance(_lowerCamelCase ,_lowerCamelCase )
for k, v in name.items():
assert isinstance(_lowerCamelCase ,tf.Variable )
else:
a__ = model.get_output_embeddings()
assert x is None
a__ = model.get_bias()
assert name is None
@require_tf
class snake_case_ (unittest.TestCase ):
@slow
def lowerCamelCase__( self :Dict ) -> Optional[int]:
a__ = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
a__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
a__ = model(_lowerCamelCase )[0]
a__ = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) ,_lowerCamelCase )
# compare the actual values for a slice.
a__ = tf.constant(
[
[
[8.92_15_18, -10.58_98_14, -6.4_67_13_07],
[-6.3_96_71_56, -13.91_13_77, -1.1_21_19_15],
[-7.78_12_47, -13.95_15_57, -3.74_05_92],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1E-2 ) )
@slow
def lowerCamelCase__( self :Dict ) -> Dict:
a__ = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
a__ = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
a__ = model(_lowerCamelCase )[0]
# compare the actual values for a slice.
a__ = tf.constant(
[
[
[0.14_44_30_92, 0.54_12_53_27, 0.3_24_77_39],
[0.30_34_04_84, 0.00_52_66_76, 0.31_07_77_22],
[0.32_27_80_43, -0.24_98_70_96, 0.3_41_46_28],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1E-4 ) )
| 369 |
from __future__ import annotations
def __lowercase ( __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float ):
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance < 0:
raise ValueError('Resistance cannot be negative' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 109 | 0 |
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
@require_torch
def UpperCamelCase__ ( self : str ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
_a = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
_a = "\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
_a = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(__a )
BertModel.from_pretrained(__a )
BertTokenizer.from_pretrained(__a )
pipeline(task="fill-mask" , model=__a )
# baseline - just load from_pretrained with normal network
_a = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
_a = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a = "1"
_a = subprocess.run(__a , env=__a , check=__a , capture_output=__a )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def UpperCamelCase__ ( self : Optional[Any] ):
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
_a = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
_a = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
_a = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(__a )
BertModel.from_pretrained(__a )
BertTokenizer.from_pretrained(__a )
pipeline(task="fill-mask" , model=__a )
# baseline - just load from_pretrained with normal network
_a = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
_a = self.get_env()
_a = subprocess.run(__a , env=__a , check=__a , capture_output=__a )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def UpperCamelCase__ ( self : List[Any] ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a = "\nfrom transformers import BertConfig, BertModel, BertTokenizer\n "
_a = "\nmname = \"hf-internal-testing/tiny-random-bert-sharded\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint(\"success\")\n "
_a = "\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
# baseline - just load from_pretrained with normal network
_a = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
_a = self.get_env()
_a = subprocess.run(__a , env=__a , check=__a , capture_output=__a )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# next emulate no network
_a = [sys.executable, "-c", "\n".join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a = "1"
_a = subprocess.run(__a , env=__a , check=__a , capture_output=__a )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def UpperCamelCase__ ( self : Optional[Any] ):
_a = "\nfrom transformers import pipeline\n "
_a = "\nmname = \"hf-internal-testing/tiny-random-bert\"\npipe = pipeline(model=mname)\n "
_a = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
_a = self.get_env()
_a = "1"
_a = [sys.executable, "-c", "\n".join([load, mock, run] )]
_a = subprocess.run(__a , env=__a , check=__a , capture_output=__a )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
"You cannot infer task automatically within `pipeline` when using offline mode" , result.stderr.decode().replace("\n" , "" ) , )
@require_torch
def UpperCamelCase__ ( self : str ):
_a = "\nfrom transformers import AutoModel\n "
_a = "\nmname = \"hf-internal-testing/test_dynamic_model\"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint(\"success\")\n "
# baseline - just load from_pretrained with normal network
_a = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
_a = self.get_env()
_a = subprocess.run(__a , env=__a , check=__a , capture_output=__a )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a = "1"
_a = subprocess.run(__a , env=__a , check=__a , capture_output=__a )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
| 63 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__ ( self : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
return F"""gaussian_noise_s={seed}_shape={'_'.join([str(lowerCAmelCase_ ) for s in shape] )}.npy"""
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def __magic_name__ ( self : Optional[Any] , lowerCAmelCase_ : Any=0 , lowerCAmelCase_ : Tuple=(4, 4, 6_4, 6_4) , lowerCAmelCase_ : List[str]=False ):
"""simple docstring"""
_A: List[str] = jnp.bfloataa if fpaa else jnp.floataa
_A: Optional[int] = jnp.array(load_hf_numpy(self.get_file_format(lowerCAmelCase_ , lowerCAmelCase_ ) ) , dtype=lowerCAmelCase_ )
return image
def __magic_name__ ( self : Optional[Any] , lowerCAmelCase_ : Optional[int]=False , lowerCAmelCase_ : Optional[Any]="CompVis/stable-diffusion-v1-4" ):
"""simple docstring"""
_A: Tuple = jnp.bfloataa if fpaa else jnp.floataa
_A: str = '''bf16''' if fpaa else None
_A , _A: Union[str, Any] = FlaxUNetaDConditionModel.from_pretrained(
lowerCAmelCase_ , subfolder='''unet''' , dtype=lowerCAmelCase_ , revision=lowerCAmelCase_ )
return model, params
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : Optional[int]=0 , lowerCAmelCase_ : str=(4, 7_7, 7_6_8) , lowerCAmelCase_ : Dict=False ):
"""simple docstring"""
_A: Optional[int] = jnp.bfloataa if fpaa else jnp.floataa
_A: Union[str, Any] = jnp.array(load_hf_numpy(self.get_file_format(lowerCAmelCase_ , lowerCAmelCase_ ) ) , dtype=lowerCAmelCase_ )
return hidden_states
@parameterized.expand(
[
# fmt: off
[8_3, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[1_7, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1_0_0_0, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def __magic_name__ ( self : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any ):
"""simple docstring"""
_A , _A: Optional[Any] = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=lowerCAmelCase_ )
_A: List[str] = self.get_latents(lowerCAmelCase_ , fpaa=lowerCAmelCase_ )
_A: Optional[int] = self.get_encoder_hidden_states(lowerCAmelCase_ , fpaa=lowerCAmelCase_ )
_A: List[str] = model.apply(
{'''params''': params} , lowerCAmelCase_ , jnp.array(lowerCAmelCase_ , dtype=jnp.intaa ) , encoder_hidden_states=lowerCAmelCase_ , ).sample
assert sample.shape == latents.shape
_A: Any = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
_A: Tuple = jnp.array(lowerCAmelCase_ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[8_3, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[1_7, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1_0_0_0, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def __magic_name__ ( self : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple ):
"""simple docstring"""
_A , _A: Union[str, Any] = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=lowerCAmelCase_ )
_A: Dict = self.get_latents(lowerCAmelCase_ , shape=(4, 4, 9_6, 9_6) , fpaa=lowerCAmelCase_ )
_A: Dict = self.get_encoder_hidden_states(lowerCAmelCase_ , shape=(4, 7_7, 1_0_2_4) , fpaa=lowerCAmelCase_ )
_A: Optional[int] = model.apply(
{'''params''': params} , lowerCAmelCase_ , jnp.array(lowerCAmelCase_ , dtype=jnp.intaa ) , encoder_hidden_states=lowerCAmelCase_ , ).sample
assert sample.shape == latents.shape
_A: List[str] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
_A: List[str] = jnp.array(lowerCAmelCase_ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-2 )
| 121 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__lowerCamelCase : Any = logging.get_logger(__name__)
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase_ = ['pixel_values']
def __init__( self : Optional[Any] , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : int = 0.9 , _lowercase : PILImageResampling = PILImageResampling.BICUBIC , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : Union[int, float] = 1 / 2_55 , _lowercase : bool = True , _lowercase : bool = True , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , **_lowercase : Any , ):
"""simple docstring"""
super().__init__(**a_ )
SCREAMING_SNAKE_CASE__ = size if size is not None else {"""shortest_edge""": 2_24}
SCREAMING_SNAKE_CASE__ = get_size_dict(a_ , default_to_square=a_ )
SCREAMING_SNAKE_CASE__ = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
SCREAMING_SNAKE_CASE__ = get_size_dict(a_ , param_name="""crop_size""" )
SCREAMING_SNAKE_CASE__ = do_resize
SCREAMING_SNAKE_CASE__ = size
SCREAMING_SNAKE_CASE__ = crop_pct
SCREAMING_SNAKE_CASE__ = resample
SCREAMING_SNAKE_CASE__ = do_center_crop
SCREAMING_SNAKE_CASE__ = crop_size
SCREAMING_SNAKE_CASE__ = do_rescale
SCREAMING_SNAKE_CASE__ = rescale_factor
SCREAMING_SNAKE_CASE__ = do_normalize
SCREAMING_SNAKE_CASE__ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
SCREAMING_SNAKE_CASE__ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __a ( self : Any , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : Optional[float] = None , _lowercase : PILImageResampling = PILImageResampling.BICUBIC , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Dict , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = get_size_dict(a_ , default_to_square=a_ )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(f"""size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}""" )
if crop_pct is not None:
if "shortest_edge" in size:
SCREAMING_SNAKE_CASE__ = int(size["""shortest_edge"""] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
SCREAMING_SNAKE_CASE__ = int(size["""height"""] / crop_pct )
else:
SCREAMING_SNAKE_CASE__ = (int(size["""height"""] / crop_pct ), int(size["""width"""] / crop_pct ))
else:
raise ValueError("""Invalid size for resize: {}""".format(a_ ) )
SCREAMING_SNAKE_CASE__ = get_resize_output_image_size(a_ , size=a_ , default_to_square=a_ )
else:
if "shortest_edge" in size:
SCREAMING_SNAKE_CASE__ = get_resize_output_image_size(a_ , size=size["""shortest_edge"""] , default_to_square=a_ )
elif "height" in size and "width" in size:
SCREAMING_SNAKE_CASE__ = (size["""height"""], size["""width"""])
else:
raise ValueError("""Invalid size for resize: {}""".format(a_ ) )
return resize(a_ , size=a_ , resample=a_ , data_format=a_ , **a_ )
def __a ( self : Any , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : List[str] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = get_size_dict(a_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""size must contain \'height\' and \'width\' as keys. Got {size.keys()}""" )
return center_crop(a_ , size=(size["""height"""], size["""width"""]) , data_format=a_ , **a_ )
def __a ( self : List[Any] , _lowercase : np.ndarray , _lowercase : Union[int, float] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : int , ):
"""simple docstring"""
return rescale(a_ , scale=a_ , data_format=a_ , **a_ )
def __a ( self : Dict , _lowercase : np.ndarray , _lowercase : Union[float, List[float]] , _lowercase : Union[float, List[float]] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Tuple , ):
"""simple docstring"""
return normalize(a_ , mean=a_ , std=a_ , data_format=a_ , **a_ )
def __a ( self : str , _lowercase : ImageInput , _lowercase : bool = None , _lowercase : Dict[str, int] = None , _lowercase : int = None , _lowercase : PILImageResampling = None , _lowercase : bool = None , _lowercase : Dict[str, int] = None , _lowercase : bool = None , _lowercase : float = None , _lowercase : bool = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : ChannelDimension = ChannelDimension.FIRST , **_lowercase : str , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ = crop_pct if crop_pct is not None else self.crop_pct
SCREAMING_SNAKE_CASE__ = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE__ = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__ = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__ = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ = get_size_dict(a_ , default_to_square=a_ )
SCREAMING_SNAKE_CASE__ = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE__ = get_size_dict(a_ , param_name="""crop_size""" )
SCREAMING_SNAKE_CASE__ = make_list_of_images(a_ )
if not valid_images(a_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_pct is None:
raise ValueError("""Crop_pct must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ = [to_numpy_array(a_ ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ = [self.resize(image=a_ , size=a_ , crop_pct=a_ , resample=a_ ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE__ = [self.center_crop(image=a_ , size=a_ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ = [self.rescale(image=a_ , scale=a_ ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ = [self.normalize(image=a_ , mean=a_ , std=a_ ) for image in images]
SCREAMING_SNAKE_CASE__ = [to_channel_dimension_format(a_ , a_ ) for image in images]
SCREAMING_SNAKE_CASE__ = {"""pixel_values""": images}
return BatchFeature(data=a_ , tensor_type=a_ )
| 359 | import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] , __UpperCamelCase : str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = checkpoint
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = vae_state_dict["""encoder.conv_in.weight"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""encoder.conv_in.bias"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""encoder.conv_out.weight"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""encoder.conv_out.bias"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""encoder.norm_out.weight"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""encoder.norm_out.bias"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""decoder.conv_in.weight"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""decoder.conv_in.bias"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""decoder.conv_out.weight"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""decoder.conv_out.bias"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""decoder.norm_out.weight"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""decoder.norm_out.bias"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""quant_conv.weight"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""quant_conv.bias"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""post_quant_conv.weight"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""post_quant_conv.bias"""]
# Retrieves the keys for the encoder down blocks only
SCREAMING_SNAKE_CASE__ = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """encoder.down""" in layer} )
SCREAMING_SNAKE_CASE__ = {
layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(__UpperCamelCase )
}
# Retrieves the keys for the decoder up blocks only
SCREAMING_SNAKE_CASE__ = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """decoder.up""" in layer} )
SCREAMING_SNAKE_CASE__ = {
layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(__UpperCamelCase )
}
for i in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = [key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key]
if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
SCREAMING_SNAKE_CASE__ = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.weight""" )
SCREAMING_SNAKE_CASE__ = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.bias""" )
SCREAMING_SNAKE_CASE__ = renew_vae_resnet_paths(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = {"""old""": f"""down.{i}.block""", """new""": f"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = [key for key in vae_state_dict if """encoder.mid.block""" in key]
SCREAMING_SNAKE_CASE__ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
SCREAMING_SNAKE_CASE__ = [key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key]
SCREAMING_SNAKE_CASE__ = renew_vae_resnet_paths(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = {"""old""": f"""mid.block_{i}""", """new""": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = [key for key in vae_state_dict if """encoder.mid.attn""" in key]
SCREAMING_SNAKE_CASE__ = renew_vae_attention_paths(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
conv_attn_to_linear(__UpperCamelCase )
for i in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = num_up_blocks - 1 - i
SCREAMING_SNAKE_CASE__ = [
key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key
]
if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
SCREAMING_SNAKE_CASE__ = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.weight"""
]
SCREAMING_SNAKE_CASE__ = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.bias"""
]
SCREAMING_SNAKE_CASE__ = renew_vae_resnet_paths(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = {"""old""": f"""up.{block_id}.block""", """new""": f"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = [key for key in vae_state_dict if """decoder.mid.block""" in key]
SCREAMING_SNAKE_CASE__ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
SCREAMING_SNAKE_CASE__ = [key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key]
SCREAMING_SNAKE_CASE__ = renew_vae_resnet_paths(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = {"""old""": f"""mid.block_{i}""", """new""": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = [key for key in vae_state_dict if """decoder.mid.attn""" in key]
SCREAMING_SNAKE_CASE__ = renew_vae_attention_paths(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
conv_attn_to_linear(__UpperCamelCase )
return new_checkpoint
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : str , ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = requests.get(
""" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml""" )
SCREAMING_SNAKE_CASE__ = io.BytesIO(r.content )
SCREAMING_SNAKE_CASE__ = OmegaConf.load(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = 5_12
SCREAMING_SNAKE_CASE__ = """cuda""" if torch.cuda.is_available() else """cpu"""
if checkpoint_path.endswith("""safetensors""" ):
from safetensors import safe_open
SCREAMING_SNAKE_CASE__ = {}
with safe_open(__UpperCamelCase , framework="""pt""" , device="""cpu""" ) as f:
for key in f.keys():
SCREAMING_SNAKE_CASE__ = f.get_tensor(__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE__ = torch.load(__UpperCamelCase , map_location=__UpperCamelCase )["""state_dict"""]
# Convert the VAE model.
SCREAMING_SNAKE_CASE__ = create_vae_diffusers_config(__UpperCamelCase , image_size=__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = custom_convert_ldm_vae_checkpoint(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE__ = AutoencoderKL(**__UpperCamelCase )
vae.load_state_dict(__UpperCamelCase )
vae.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument('''--vae_pt_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
__lowerCamelCase : Optional[int] = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 204 | 0 |
'''simple docstring'''
import math
import sys
def _UpperCamelCase ( __A ) -> str:
'''simple docstring'''
UpperCamelCase__ = ""
try:
with open(__A , "rb" ) as binary_file:
UpperCamelCase__ = binary_file.read()
for dat in data:
UpperCamelCase__ = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def _UpperCamelCase ( __A ) -> str:
'''simple docstring'''
UpperCamelCase__ = {"0": "0", "1": "1"}
UpperCamelCase__ , UpperCamelCase__ = "", ""
UpperCamelCase__ = len(__A )
for i in range(len(__A ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
UpperCamelCase__ = lexicon[curr_string]
result += last_match_id
UpperCamelCase__ = last_match_id + "0"
if math.loga(__A ).is_integer():
UpperCamelCase__ = {}
for curr_key in list(__A ):
UpperCamelCase__ = lexicon.pop(__A )
UpperCamelCase__ = new_lex
UpperCamelCase__ = last_match_id + "1"
index += 1
UpperCamelCase__ = ""
return result
def _UpperCamelCase ( __A , __A ) -> None:
'''simple docstring'''
UpperCamelCase__ = 8
try:
with open(__A , "wb" ) as opened_file:
UpperCamelCase__ = [
to_write[i : i + byte_length]
for i in range(0 , len(__A ) , __A )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(__A , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def _UpperCamelCase ( __A ) -> str:
'''simple docstring'''
UpperCamelCase__ = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
UpperCamelCase__ = data_bits[counter:]
UpperCamelCase__ = data_bits[counter + 1 :]
return data_bits
def _UpperCamelCase ( __A , __A ) -> None:
'''simple docstring'''
UpperCamelCase__ = read_file_binary(__A )
UpperCamelCase__ = remove_prefix(__A )
UpperCamelCase__ = decompress_data(__A )
write_file_binary(__A , __A )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 80 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : List[Any] = TextToVideoSDPipeline
__snake_case : Optional[int] = TEXT_TO_IMAGE_PARAMS
__snake_case : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
__snake_case : Optional[int] = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def UpperCamelCase ( self: int ):
'''simple docstring'''
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , )
_SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , )
_SCREAMING_SNAKE_CASE = CLIPTextModel(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_SCREAMING_SNAKE_CASE = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Dict=0 ):
'''simple docstring'''
if str(UpperCAmelCase_ ).startswith("""mps""" ):
_SCREAMING_SNAKE_CASE = torch.manual_seed(UpperCAmelCase_ )
else:
_SCREAMING_SNAKE_CASE = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
_SCREAMING_SNAKE_CASE = self.get_dummy_components()
_SCREAMING_SNAKE_CASE = TextToVideoSDPipeline(**UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = sd_pipe.to(UpperCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.get_dummy_inputs(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = """np"""
_SCREAMING_SNAKE_CASE = sd_pipe(**UpperCAmelCase_ ).frames
_SCREAMING_SNAKE_CASE = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
_SCREAMING_SNAKE_CASE = np.array([1_58.0, 1_60.0, 1_53.0, 1_25.0, 1_00.0, 1_21.0, 1_11.0, 93.0, 1_13.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=UpperCAmelCase_ , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=UpperCAmelCase_ , expected_max_diff=1E-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def UpperCamelCase ( self: int ):
'''simple docstring'''
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
pass
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class __UpperCAmelCase (unittest.TestCase ):
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
_SCREAMING_SNAKE_CASE = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
_SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
_SCREAMING_SNAKE_CASE = pipe.to("""cuda""" )
_SCREAMING_SNAKE_CASE = """Spiderman is surfing"""
_SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 )
_SCREAMING_SNAKE_CASE = pipe(UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=25 , output_type="""pt""" ).frames
_SCREAMING_SNAKE_CASE = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
_SCREAMING_SNAKE_CASE = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
_SCREAMING_SNAKE_CASE = pipe.to("""cuda""" )
_SCREAMING_SNAKE_CASE = """Spiderman is surfing"""
_SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 )
_SCREAMING_SNAKE_CASE = pipe(UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type="""pt""" ).frames
_SCREAMING_SNAKE_CASE = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 306 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=3 , lowerCAmelCase__=18 , lowerCAmelCase__=30 , lowerCAmelCase__=400 , lowerCAmelCase__=True , lowerCAmelCase__=32 , lowerCAmelCase__=True , ) -> Optional[Any]:
a : str = parent
a : str = batch_size
a : Union[str, Any] = num_channels
a : Optional[Any] = image_size
a : int = min_resolution
a : Optional[int] = max_resolution
a : List[Any] = do_resize
a : List[Any] = size_divisor
a : Tuple = do_rescale
def __a ( self ) -> Optional[int]:
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : Dict =GLPNImageProcessor if is_vision_available() else None
def __a ( self ) -> int:
a : Any = GLPNImageProcessingTester(self )
@property
def __a ( self ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self ) -> str:
a : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_resize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "size_divisor" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "resample" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_rescale" ) )
def __a ( self ) -> List[str]:
pass
def __a ( self ) -> Union[str, Any]:
# Initialize image_processing
a : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
a : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __a ( self ) -> str:
# Initialize image_processing
a : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
a : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __a ( self ) -> int:
# Initialize image_processing
a : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
a : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 79 |
"""simple docstring"""
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _SCREAMING_SNAKE_CASE ( ) ->Optional[Any]:
'''simple docstring'''
a : int = HfArgumentParser(_lowercase )
a : int = parser.parse_args_into_dataclasses()[0]
a : Any = TensorFlowBenchmark(args=_lowercase )
try:
a : Tuple = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
a : Optional[Any] = "Arg --no_{0} is no longer used, please use --no-{0} instead."
a : Tuple = " ".join(str(_lowercase ).split(" " )[:-1] )
a : Any = ""
a : Any = eval(str(_lowercase ).split(" " )[-1] )
a : List[Any] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(_lowercase )
if len(_lowercase ) > 0:
a : Tuple = full_error_msg + begin_error_msg + str(_lowercase )
raise ValueError(_lowercase )
benchmark.run()
if __name__ == "__main__":
main()
| 79 | 1 |
"""simple docstring"""
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def UpperCamelCase ( UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
a_ = int(UpperCAmelCase )
a_ , a_ , a_ = t // 3_600, (t // 60) % 60, t % 60
return F'''{h}:{m:02d}:{s:02d}''' if h != 0 else F'''{m:02d}:{s:02d}'''
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=300 ) ->str:
"""simple docstring"""
return F'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def UpperCamelCase ( UpperCAmelCase ) ->Dict:
"""simple docstring"""
a_ = "<table border=\"1\" class=\"dataframe\">\n"
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += F''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
a_ = F'''{elt:.6f}''' if isinstance(UpperCAmelCase , UpperCAmelCase ) else str(UpperCAmelCase )
html_code += F''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class snake_case :
a_ : Dict = 5
a_ : Dict = 0.2
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = 3_00 , ) ->Dict:
a_ = total
a_ = "" if prefix is None else prefix
a_ = leave
a_ = parent
a_ = width
a_ = None
a_ = None
a_ = None
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = None) ->Optional[Any]:
a_ = value
if comment is not None:
a_ = comment
if self.last_value is None:
a_ = a_ = time.time()
a_ = a_ = value
a_ = a_ = None
a_ = self.warmup
a_ = 1
self.update_bar(__UpperCAmelCase)
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total):
if self.first_calls > 0:
self.first_calls -= 1
a_ = time.time()
a_ = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
a_ = self.elapsed_time / (value - self.start_value)
else:
a_ = None
if value >= self.total:
a_ = self.total
a_ = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
a_ = self.average_time_per_item * (self.total - value)
self.update_bar(__UpperCAmelCase)
a_ = value
a_ = current_time
if self.average_time_per_item is None:
a_ = 1
else:
a_ = max(int(self.update_every / self.average_time_per_item) , 1)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase=None) ->Dict:
a_ = " " * (len(str(self.total)) - len(str(__UpperCAmelCase))) + str(__UpperCAmelCase)
if self.elapsed_time is None:
a_ = F'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
a_ = F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time)}'''
else:
a_ = (
F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time)} <'''
F''' {format_time(self.predicted_remaining)}'''
)
self.label += F''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment) == 0 else F''', {self.comment}]'''
self.display()
def UpperCAmelCase__ ( self) ->Dict:
a_ = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width)
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
a_ = disp.display(disp.HTML(self.html_code) , display_id=__UpperCAmelCase)
else:
self.output.update(disp.HTML(self.html_code))
def UpperCAmelCase__ ( self) ->Optional[Any]:
if self.parent is None and self.output is not None:
self.output.update(disp.HTML(""))
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=None) ->Tuple:
super().__init__(__UpperCAmelCase)
a_ = None if column_names is None else [column_names]
a_ = None
def UpperCAmelCase__ ( self) ->str:
a_ = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width)
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table)
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
a_ = disp.display(disp.HTML(self.html_code) , display_id=__UpperCAmelCase)
else:
self.output.update(disp.HTML(self.html_code))
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Optional[int]:
if self.inner_table is None:
a_ = [list(values.keys()), list(values.values())]
else:
a_ = self.inner_table[0]
if len(self.inner_table) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(__UpperCAmelCase)
a_ = columns
self.inner_table.append([values[c] for c in columns])
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=3_00) ->str:
a_ = NotebookProgressBar(__UpperCAmelCase , prefix=__UpperCAmelCase , parent=self , width=__UpperCAmelCase)
return self.child_bar
def UpperCAmelCase__ ( self) ->Dict:
a_ = None
self.display()
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def __init__( self) ->List[str]:
a_ = None
a_ = None
a_ = False
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase) ->int:
a_ = "Epoch" if args.evaluation_strategy == IntervalStrategy.EPOCH else "Step"
a_ = 0
a_ = 0
a_ = [self.first_column] + ["Training Loss"]
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append("Validation Loss")
a_ = NotebookTrainingTracker(state.max_steps , __UpperCAmelCase)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase) ->Union[str, Any]:
a_ = int(state.epoch) if int(state.epoch) == state.epoch else F'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1 , comment=F'''Epoch {epoch}/{state.num_train_epochs}''' , force_update=self._force_next_update , )
a_ = False
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , **__UpperCAmelCase) ->Any:
if not has_length(__UpperCAmelCase):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
a_ = self.training_tracker.add_child(len(__UpperCAmelCase))
else:
a_ = NotebookProgressBar(len(__UpperCAmelCase))
self.prediction_bar.update(1)
else:
self.prediction_bar.update(self.prediction_bar.value + 1)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase) ->List[Any]:
if self.prediction_bar is not None:
self.prediction_bar.close()
a_ = None
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , **__UpperCAmelCase) ->Optional[Any]:
# Only for when there is no evaluation
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
a_ = {"Training Loss": logs["loss"]}
# First column is necessarily Step sine we're not in epoch eval strategy
a_ = state.global_step
self.training_tracker.write_line(__UpperCAmelCase)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , **__UpperCAmelCase) ->int:
if self.training_tracker is not None:
a_ = {"Training Loss": "No log", "Validation Loss": "No log"}
for log in reversed(state.log_history):
if "loss" in log:
a_ = log["loss"]
break
if self.first_column == "Epoch":
a_ = int(state.epoch)
else:
a_ = state.global_step
a_ = "eval"
for k in metrics:
if k.endswith("_loss"):
a_ = re.sub(r"\_loss$" , "" , __UpperCAmelCase)
a_ = metrics.pop("total_flos" , __UpperCAmelCase)
a_ = metrics.pop("epoch" , __UpperCAmelCase)
a_ = metrics.pop(F'''{metric_key_prefix}_runtime''' , __UpperCAmelCase)
a_ = metrics.pop(F'''{metric_key_prefix}_samples_per_second''' , __UpperCAmelCase)
a_ = metrics.pop(F'''{metric_key_prefix}_steps_per_second''' , __UpperCAmelCase)
a_ = metrics.pop(F'''{metric_key_prefix}_jit_compilation_time''' , __UpperCAmelCase)
for k, v in metrics.items():
if k == F'''{metric_key_prefix}_loss''':
a_ = v
else:
a_ = k.split("_")
a_ = " ".join([part.capitalize() for part in splits[1:]])
a_ = v
self.training_tracker.write_line(__UpperCAmelCase)
self.training_tracker.remove_child()
a_ = None
# Evaluation takes a long time so we should force the next update.
a_ = True
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase) ->int:
self.training_tracker.update(
state.global_step , comment=F'''Epoch {int(state.epoch)}/{state.num_train_epochs}''' , force_update=__UpperCAmelCase)
a_ = None | 243 |
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : List[str] = """"""
a_ : Dict = """hf-legacy""" # "hf://"" is reserved for hffs
def __init__( self , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ) ->Optional[int]:
super().__init__(self , **__UpperCAmelCase)
a_ = repo_info
a_ = token
a_ = None
def UpperCAmelCase__ ( self) ->Tuple:
if self.dir_cache is None:
a_ = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
a_ = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(__UpperCAmelCase): {"name": str(__UpperCAmelCase), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename).parents)[:-1]
})
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = "rb" , **__UpperCAmelCase , ) ->List[Any]:
if not isinstance(self.repo_info , __UpperCAmelCase):
raise NotImplementedError(F'''Open is only implemented for dataset repositories, but got {self.repo_info}''')
a_ = hf_hub_url(self.repo_info.id , __UpperCAmelCase , revision=self.repo_info.sha)
return fsspec.open(
__UpperCAmelCase , mode=__UpperCAmelCase , headers=get_authentication_headers_for_url(__UpperCAmelCase , use_auth_token=self.token) , client_kwargs={"trust_env": True} , ).open()
def UpperCAmelCase__ ( self , __UpperCAmelCase , **__UpperCAmelCase) ->int:
self._get_dirs()
a_ = self._strip_protocol(__UpperCAmelCase)
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__UpperCAmelCase)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase=False , **__UpperCAmelCase) ->List[Any]:
self._get_dirs()
a_ = PurePosixPath(path.strip("/"))
a_ = {}
for p, f in self.dir_cache.items():
a_ = PurePosixPath(p.strip("/"))
a_ = p.parent
if root == path:
a_ = f
a_ = list(paths.values())
if detail:
return out
else:
return sorted(f["name"] for f in out) | 243 | 1 |
'''simple docstring'''
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
snake_case__ : Tuple = False
try:
snake_case__ : Dict = _is_package_available('''google.colab''')
except ModuleNotFoundError:
pass
@input.register
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , snake_case_ = None , snake_case_ = [] ):
'''simple docstring'''
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : List[str] = choices
UpperCAmelCase_ : List[Any] = prompt
if sys.platform == "win32":
UpperCAmelCase_ : List[str] = '''*'''
else:
UpperCAmelCase_ : Optional[Any] = '''➔ '''
def _UpperCamelCase ( self , snake_case_ , snake_case_ = "" ):
'''simple docstring'''
if sys.platform != "win32":
writeColor(self.choices[index] , 3_2 , _SCREAMING_SNAKE_CASE )
else:
forceWrite(self.choices[index] , _SCREAMING_SNAKE_CASE )
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
if index == self.position:
forceWrite(F''' {self.arrow_char} ''' )
self.write_choice(_SCREAMING_SNAKE_CASE )
else:
forceWrite(F''' {self.choices[index]}''' )
reset_cursor()
def _UpperCamelCase ( self , snake_case_ , snake_case_ = 1 ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(_SCREAMING_SNAKE_CASE )
move_cursor(_SCREAMING_SNAKE_CASE , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['up'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
self.move_direction(Direction.UP )
@input.mark(KEYMAP['down'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['newline'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
move_cursor(len(self.choices ) - self.position , 'DOWN' )
return self.position
@input.mark(KEYMAP['interrupt'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
move_cursor(len(self.choices ) - self.position , 'DOWN' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(_SCREAMING_SNAKE_CASE )] for number in range(1_0 )] )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Dict = int(chr(self.current_selection ) )
UpperCAmelCase_ : List[Any] = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , _SCREAMING_SNAKE_CASE )
else:
return
else:
return
def _UpperCamelCase ( self , snake_case_ = 0 ):
'''simple docstring'''
if self.prompt:
linebreak()
forceWrite(self.prompt , '\n' )
if in_colab:
forceWrite('Please input a choice index (starting from 0), and press enter' , '\n' )
else:
forceWrite('Please select a choice using the arrow or number keys, and selecting with enter' , '\n' )
UpperCAmelCase_ : List[str] = default_choice
for i in range(len(self.choices ) ):
self.print_choice(_SCREAMING_SNAKE_CASE )
forceWrite('\n' )
move_cursor(len(self.choices ) - self.position , 'UP' )
with cursor.hide():
while True:
if in_colab:
try:
UpperCAmelCase_ : Dict = int(builtins.input() )
except ValueError:
UpperCAmelCase_ : int = default_choice
else:
UpperCAmelCase_ : str = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , 'UP' )
clear_line()
self.write_choice(_SCREAMING_SNAKE_CASE , '\n' )
return choice
| 371 | '''simple docstring'''
from sklearn.metrics import fa_score
import datasets
snake_case__ : str = '''
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
'''
snake_case__ : int = '''
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{\'f1\': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results[\'f1\'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results[\'f1\'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")
>>> print(round(results[\'f1\'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'f1\': array([0.8, 0. , 0. ])}
'''
snake_case__ : Tuple = '''
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
'''simple docstring'''
def _UpperCamelCase ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('int32' ) ),
'references': datasets.Sequence(datasets.Value('int32' ) ),
}
if self.config_name == 'multilabel'
else {
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) , reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'] , )
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_=None , snake_case_=1 , snake_case_="binary" , snake_case_=None ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = fa_score(
snake_case_ , snake_case_ , labels=snake_case_ , pos_label=snake_case_ , average=snake_case_ , sample_weight=snake_case_ )
return {"f1": float(snake_case_ ) if score.size == 1 else score}
| 274 | 0 |
"""simple docstring"""
from __future__ import annotations
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = None ) -> list[list[str]]:
lowercase__ : List[str] = word_bank or []
# create a table
lowercase__ : int = len(__lowerCamelCase ) + 1
lowercase__ : list[list[list[str]]] = []
for _ in range(__lowerCamelCase ):
table.append([] )
# seed value
lowercase__ : Union[str, Any] = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(__lowerCamelCase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(__lowerCamelCase )] == word:
lowercase__ : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(__lowerCamelCase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(__lowerCamelCase )]:
combination.reverse()
return table[len(__lowerCamelCase )]
if __name__ == "__main__":
print(all_construct('jwajalapa', ['jwa', 'j', 'w', 'a', 'la', 'lapa']))
print(all_construct('rajamati', ['s', 'raj', 'amat', 'raja', 'ma', 'i', 't']))
print(
all_construct(
'hexagonosaurus',
['h', 'ex', 'hex', 'ag', 'ago', 'ru', 'auru', 'rus', 'go', 'no', 'o', 's'],
)
)
| 16 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A: List[str] = {
"configuration_clipseg": [
"CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPSegConfig",
"CLIPSegTextConfig",
"CLIPSegVisionConfig",
],
"processing_clipseg": ["CLIPSegProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Optional[int] = [
"CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPSegModel",
"CLIPSegPreTrainedModel",
"CLIPSegTextModel",
"CLIPSegVisionModel",
"CLIPSegForImageSegmentation",
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
A: Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 109 | 0 |
'''simple docstring'''
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
__UpperCamelCase = re.compile(R"^(?P<major>\d+)" R"\.(?P<minor>\d+)" R"\.(?P<patch>\d+)$")
@total_ordering
@dataclass
class _A :
lowercase__: str
lowercase__: Optional[str] = None
lowercase__: Optional[Union[str, int]] = None
lowercase__: Optional[Union[str, int]] = None
lowercase__: Optional[Union[str, int]] = None
def lowercase__ ( self : str ) -> List[str]:
"""simple docstring"""
__snake_case , __snake_case , __snake_case : List[Any] = _str_to_version_tuple(self.version_str )
def __repr__( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return f'''{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'''
@property
def lowercase__ ( self : Tuple ) -> Dict:
"""simple docstring"""
return self.major, self.minor, self.patch
def lowercase__ ( self : Any , __magic_name__ : Dict ) -> Optional[int]:
"""simple docstring"""
if isinstance(__magic_name__ , __magic_name__ ):
return Version(__magic_name__ )
elif isinstance(__magic_name__ , __magic_name__ ):
return other
raise TypeError(f'''{other} (type {type(__magic_name__ )}) cannot be compared to version.''' )
def __eq__( self : Optional[Any] , __magic_name__ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
try:
__snake_case : Union[str, Any] = self._validate_operand(__magic_name__ )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self : Union[str, Any] , __magic_name__ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__snake_case : Union[str, Any] = self._validate_operand(__magic_name__ )
return self.tuple < other.tuple
def __hash__( self : Any ) -> Any:
"""simple docstring"""
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def lowercase__ ( cls : List[str] , __magic_name__ : Tuple ) -> str:
"""simple docstring"""
__snake_case : List[str] = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def lowercase__ ( self : str ) -> str:
"""simple docstring"""
return self.version_str
def _a ( _lowerCamelCase ) -> List[str]:
"""simple docstring"""
__snake_case : List[Any] = _VERSION_REG.match(_lowerCamelCase )
if not res:
raise ValueError(F'''Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.''' )
return tuple(int(_lowerCamelCase ) for v in [res.group("""major""" ), res.group("""minor""" ), res.group("""patch""" )] )
def _a ( _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
return ".".join(str(_lowerCamelCase ) for v in version_tuple )
| 13 |
'''simple docstring'''
def _a ( _lowerCamelCase ) -> Dict:
"""simple docstring"""
__snake_case : str = 0
__snake_case : Optional[int] = len(_lowerCamelCase )
for i in range(n - 1 ):
for j in range(i + 1 , _lowerCamelCase ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def _a ( _lowerCamelCase ) -> Tuple:
"""simple docstring"""
if len(_lowerCamelCase ) <= 1:
return arr, 0
__snake_case : Any = len(_lowerCamelCase ) // 2
__snake_case : List[str] = arr[0:mid]
__snake_case : int = arr[mid:]
__snake_case , __snake_case : List[Any] = count_inversions_recursive(_lowerCamelCase )
__snake_case , __snake_case : Tuple = count_inversions_recursive(_lowerCamelCase )
__snake_case , __snake_case : str = _count_cross_inversions(_lowerCamelCase , _lowerCamelCase )
__snake_case : str = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def _a ( _lowerCamelCase , _lowerCamelCase ) -> int:
"""simple docstring"""
__snake_case : Any = []
__snake_case : List[str] = 0
while i < len(_lowerCamelCase ) and j < len(_lowerCamelCase ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(_lowerCamelCase ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(_lowerCamelCase ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def _a ( ) -> Optional[int]:
"""simple docstring"""
__snake_case : Optional[Any] = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
__snake_case : Optional[Any] = count_inversions_bf(_lowerCamelCase )
__snake_case , __snake_case : Union[str, Any] = count_inversions_recursive(_lowerCamelCase )
assert num_inversions_bf == num_inversions_recursive == 8
print("""number of inversions = """ , _lowerCamelCase )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
__snake_case : Any = count_inversions_bf(_lowerCamelCase )
__snake_case , __snake_case : Union[str, Any] = count_inversions_recursive(_lowerCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , _lowerCamelCase )
# an empty list should also have zero inversions
__snake_case : List[Any] = []
__snake_case : List[Any] = count_inversions_bf(_lowerCamelCase )
__snake_case , __snake_case : List[Any] = count_inversions_recursive(_lowerCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , _lowerCamelCase )
if __name__ == "__main__":
main()
| 13 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 27 |
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
lowerCamelCase : Any = logging.get_logger(__name__)
class A( UpperCamelCase ):
'''simple docstring'''
def __init__( self : Dict , *A_ : Optional[int] , **A_ : int ) -> None:
"""simple docstring"""
warnings.warn(
'The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DonutImageProcessor instead.' , A_ , )
super().__init__(*A_ , **A_ )
| 204 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : List[str] = logging.get_logger(__name__)
snake_case__ : Optional[int] = {
'''tiiuae/falcon-40b''': '''https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json''',
'''tiiuae/falcon-7b''': '''https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase_ :str = '''falcon'''
lowerCamelCase_ :Optional[Any] = ['''past_key_values''']
def __init__( self , snake_case_=6_5_0_2_4 , snake_case_=4_5_4_4 , snake_case_=3_2 , snake_case_=7_1 , snake_case_=1E-5 , snake_case_=0.02 , snake_case_=True , snake_case_=0.0 , snake_case_=0.0 , snake_case_=None , snake_case_=False , snake_case_=False , snake_case_=True , snake_case_=True , snake_case_=False , snake_case_=1_1 , snake_case_=1_1 , **snake_case_ , ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = vocab_size
# Backward compatibility with n_embed kwarg
UpperCAmelCase_ : Optional[Any] = kwargs.pop('n_embed' , snake_case_ )
UpperCAmelCase_ : int = hidden_size if n_embed is None else n_embed
UpperCAmelCase_ : Optional[Any] = num_hidden_layers
UpperCAmelCase_ : Any = num_attention_heads
UpperCAmelCase_ : Optional[int] = layer_norm_epsilon
UpperCAmelCase_ : Optional[Any] = initializer_range
UpperCAmelCase_ : Dict = use_cache
UpperCAmelCase_ : List[Any] = hidden_dropout
UpperCAmelCase_ : List[Any] = attention_dropout
UpperCAmelCase_ : Union[str, Any] = bos_token_id
UpperCAmelCase_ : Optional[int] = eos_token_id
UpperCAmelCase_ : int = num_attention_heads if num_kv_heads is None else num_kv_heads
UpperCAmelCase_ : Dict = alibi
UpperCAmelCase_ : List[str] = new_decoder_architecture
UpperCAmelCase_ : List[str] = multi_query # Ignored when new_decoder_architecture is True
UpperCAmelCase_ : List[str] = parallel_attn
UpperCAmelCase_ : Union[str, Any] = bias
super().__init__(bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
return self.hidden_size // self.num_attention_heads
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
return not self.alibi
| 356 | '''simple docstring'''
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase_ :Union[str, Any] = 1
@register_to_config
def __init__( self , snake_case_ = 1_0_0_0 , snake_case_ = None ):
'''simple docstring'''
self.set_timesteps(snake_case_ )
# standard deviation of the initial noise distribution
UpperCAmelCase_ : Union[str, Any] = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
UpperCAmelCase_ : int = 4
# running values
UpperCAmelCase_ : str = []
def _UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = num_inference_steps
UpperCAmelCase_ : int = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
UpperCAmelCase_ : Tuple = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
UpperCAmelCase_ : Optional[int] = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
UpperCAmelCase_ : Tuple = torch.sin(steps * math.pi / 2 ) ** 2
UpperCAmelCase_ : Dict = (1.0 - self.betas**2) ** 0.5
UpperCAmelCase_ : str = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
UpperCAmelCase_ : str = timesteps.to(snake_case_ )
UpperCAmelCase_ : Any = []
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = True , ):
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
UpperCAmelCase_ : Any = (self.timesteps == timestep).nonzero().item()
UpperCAmelCase_ : Optional[Any] = timestep_index + 1
UpperCAmelCase_ : Dict = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(snake_case_ )
if len(self.ets ) == 1:
UpperCAmelCase_ : Tuple = self.ets[-1]
elif len(self.ets ) == 2:
UpperCAmelCase_ : Any = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
UpperCAmelCase_ : List[str] = (2_3 * self.ets[-1] - 1_6 * self.ets[-2] + 5 * self.ets[-3]) / 1_2
else:
UpperCAmelCase_ : Union[str, Any] = (1 / 2_4) * (5_5 * self.ets[-1] - 5_9 * self.ets[-2] + 3_7 * self.ets[-3] - 9 * self.ets[-4])
UpperCAmelCase_ : Union[str, Any] = self._get_prev_sample(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=snake_case_ )
def _UpperCamelCase ( self , snake_case_ , *snake_case_ , **snake_case_ ):
'''simple docstring'''
return sample
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : int = self.alphas[timestep_index]
UpperCAmelCase_ : Union[str, Any] = self.betas[timestep_index]
UpperCAmelCase_ : Any = self.alphas[prev_timestep_index]
UpperCAmelCase_ : Dict = self.betas[prev_timestep_index]
UpperCAmelCase_ : List[Any] = (sample - sigma * ets) / max(snake_case_ , 1E-8 )
UpperCAmelCase_ : Tuple = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self ):
'''simple docstring'''
return self.config.num_train_timesteps
| 274 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase_ = {
'''configuration_layoutlmv2''': ['''LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LayoutLMv2Config'''],
'''processing_layoutlmv2''': ['''LayoutLMv2Processor'''],
'''tokenization_layoutlmv2''': ['''LayoutLMv2Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ['''LayoutLMv2TokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ['''LayoutLMv2FeatureExtractor''']
lowerCamelCase_ = ['''LayoutLMv2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv2ForQuestionAnswering''',
'''LayoutLMv2ForSequenceClassification''',
'''LayoutLMv2ForTokenClassification''',
'''LayoutLMv2Layer''',
'''LayoutLMv2Model''',
'''LayoutLMv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 79 |
'''simple docstring'''
from typing import List
import numpy as np
def __lowercase ( __lowercase ) -> int:
'''simple docstring'''
_A = {key: len(__lowercase ) for key, value in gen_kwargs.items() if isinstance(__lowercase , __lowercase )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
"Sharding is ambiguous for this dataset: "
+ "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n"
+ "\n".join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() )
+ "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, "
+ "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."
) )
_A = max(lists_lengths.values() , default=0 )
return max(1 , __lowercase )
def __lowercase ( __lowercase , __lowercase ) -> List[range]:
'''simple docstring'''
_A = []
for group_idx in range(__lowercase ):
_A = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
_A = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
_A = range(__lowercase , start + num_shards_to_add )
shards_indices_per_group.append(__lowercase )
return shards_indices_per_group
def __lowercase ( __lowercase , __lowercase ) -> List[dict]:
'''simple docstring'''
_A = _number_of_shards_in_gen_kwargs(__lowercase )
if num_shards == 1:
return [dict(__lowercase )]
else:
_A = _distribute_shards(num_shards=__lowercase , max_num_jobs=__lowercase )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(__lowercase , __lowercase )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(__lowercase ) )
]
def __lowercase ( __lowercase ) -> dict:
'''simple docstring'''
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , __lowercase )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def __lowercase ( __lowercase , __lowercase ) -> dict:
'''simple docstring'''
_A = {len(__lowercase ) for value in gen_kwargs.values() if isinstance(__lowercase , __lowercase )}
_A = {}
for size in list_sizes:
_A = list(range(__lowercase ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
_A = dict(__lowercase )
for key, value in shuffled_kwargs.items():
if isinstance(__lowercase , __lowercase ):
_A = [value[i] for i in indices_per_size[len(__lowercase )]]
return shuffled_kwargs
| 79 | 1 |
'''simple docstring'''
import string
def _lowerCamelCase ( lowercase : str ) -> Union[str, Any]:
for key in range(len(string.ascii_uppercase ) ):
_a = """"""
for symbol in message:
if symbol in string.ascii_uppercase:
_a = string.ascii_uppercase.find(A__ )
_a = num - key
if num < 0:
_a = num + len(string.ascii_uppercase )
_a = translated + string.ascii_uppercase[num]
else:
_a = translated + symbol
print(F'Decryption using Key #{key}: {translated}' )
def _lowerCamelCase ( ) -> str:
_a = input("Encrypted message: " )
_a = message.upper()
decrypt(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 351 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =['image_processor', 'tokenizer']
__a ='LayoutLMv2ImageProcessor'
__a =('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast')
def __init__( self : Dict , __a : int=None , __a : List[Any]=None , **__a : str ):
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
_a = kwargs.pop("feature_extractor" )
_a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
def __call__( self : Optional[int] , __a : Optional[Any] , __a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __a : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __a : Union[List[List[int]], List[List[List[int]]]] = None , __a : Optional[Union[List[int], List[List[int]]]] = None , __a : bool = True , __a : Union[bool, str, PaddingStrategy] = False , __a : Union[bool, str, TruncationStrategy] = None , __a : Optional[int] = None , __a : int = 0 , __a : Optional[int] = None , __a : Optional[bool] = None , __a : Optional[bool] = None , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = True , __a : Optional[Union[str, TensorType]] = None , **__a : Optional[Any] , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes "
"if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("You cannot return overflowing tokens without returning the offsets mapping." )
# first, apply the image processor
_a = self.image_processor(images=__a , return_tensors=__a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__a , __a ):
_a = [text] # add batch dimension (as the image processor always adds a batch dimension)
_a = features["words"]
_a = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_token_type_ids=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , )
# add pixel values
_a = features.pop("pixel_values" )
if return_overflowing_tokens is True:
_a = self.get_overflowing_images(__a , encoded_inputs["overflow_to_sample_mapping"] )
_a = images
return encoded_inputs
def UpperCamelCase__ ( self : int , __a : List[Any] , __a : int ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
_a = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__a ) != len(__a ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
f' {len(__a )} and {len(__a )}' )
return images_with_overflow
def UpperCamelCase__ ( self : Optional[Any] , *__a : Dict , **__a : Union[str, Any] ):
return self.tokenizer.batch_decode(*__a , **__a )
def UpperCamelCase__ ( self : Union[str, Any] , *__a : Optional[int] , **__a : Optional[Any] ):
return self.tokenizer.decode(*__a , **__a )
@property
def UpperCamelCase__ ( self : int ):
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def UpperCamelCase__ ( self : List[Any] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , )
return self.image_processor_class
@property
def UpperCamelCase__ ( self : int ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , )
return self.image_processor
| 346 | 0 |
'''simple docstring'''
from ....utils import logging
lowercase_ = logging.get_logger(__name__)
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__( self , A , A=None , A=2048 ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = config.__dict__
_SCREAMING_SNAKE_CASE = modal_hidden_size
if num_labels:
_SCREAMING_SNAKE_CASE = num_labels
| 58 |
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
A : List[str] = input('''Enter image url: ''').strip()
print(F'''Downloading image from {url} ...''')
A : Any = BeautifulSoup(requests.get(url).content, '''html.parser''')
# The image URL is in the content field of the first meta tag with property og:image
A : List[Any] = soup.find('''meta''', {'''property''': '''og:image'''})['''content''']
A : Dict = requests.get(image_url).content
A : Tuple = F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg'''
with open(file_name, '''wb''') as fp:
fp.write(image_data)
print(F'''Done. Image saved to disk as {file_name}.''')
| 274 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : int = {
'configuration_clipseg': [
'CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP',
'CLIPSegConfig',
'CLIPSegTextConfig',
'CLIPSegVisionConfig',
],
'processing_clipseg': ['CLIPSegProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = [
'CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST',
'CLIPSegModel',
'CLIPSegPreTrainedModel',
'CLIPSegTextModel',
'CLIPSegVisionModel',
'CLIPSegForImageSegmentation',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
__A : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 361 |
"""simple docstring"""
from __future__ import annotations
class __UpperCamelCase :
def __init__(self : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str):
A , A = text, pattern
A , A = len(__SCREAMING_SNAKE_CASE), len(__SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : Union[str, Any] , __SCREAMING_SNAKE_CASE : str):
for i in range(self.patLen - 1 , -1 , -1):
if char == self.pattern[i]:
return i
return -1
def SCREAMING_SNAKE_CASE__ (self : str , __SCREAMING_SNAKE_CASE : int):
for i in range(self.patLen - 1 , -1 , -1):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def SCREAMING_SNAKE_CASE__ (self : List[Any]):
# searches pattern in text and returns index positions
A = []
for i in range(self.textLen - self.patLen + 1):
A = self.mismatch_in_text(__SCREAMING_SNAKE_CASE)
if mismatch_index == -1:
positions.append(__SCREAMING_SNAKE_CASE)
else:
A = self.match_in_pattern(self.text[mismatch_index])
A = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
__A : int = 'ABAABA'
__A : Optional[Any] = 'AB'
__A : Any = BoyerMooreSearch(text, pattern)
__A : Any = bms.bad_character_heuristic()
if len(positions) == 0:
print('No match found')
else:
print('Pattern found in following positions: ')
print(positions)
| 57 | 0 |
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
lowerCAmelCase : List[Any] = re.compile(R"""^(?P<major>\d+)""" R"""\.(?P<minor>\d+)""" R"""\.(?P<patch>\d+)$""")
@total_ordering
@dataclass
class __lowercase :
"""simple docstring"""
_UpperCAmelCase : str
_UpperCAmelCase : Optional[str] = None
_UpperCAmelCase : Optional[Union[str, int]] = None
_UpperCAmelCase : Optional[Union[str, int]] = None
_UpperCAmelCase : Optional[Union[str, int]] = None
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = _str_to_version_tuple(self.version_str)
def __repr__( self : List[Any]):
return F"{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple):
return self.major, self.minor, self.patch
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Union[str, Any]):
if isinstance(lowerCAmelCase__ , lowerCAmelCase__):
return Version(lowerCAmelCase__)
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__):
return other
raise TypeError(F"{other} (type {type(lowerCAmelCase__)}) cannot be compared to version.")
def __eq__( self : Tuple , lowerCAmelCase__ : Optional[int]):
try:
SCREAMING_SNAKE_CASE_: List[str] = self._validate_operand(lowerCAmelCase__)
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self : Optional[Any] , lowerCAmelCase__ : Dict):
SCREAMING_SNAKE_CASE_: Any = self._validate_operand(lowerCAmelCase__)
return self.tuple < other.tuple
def __hash__( self : str):
return hash(_version_tuple_to_str(self.tuple))
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[Any] , lowerCAmelCase__ : List[Any]):
SCREAMING_SNAKE_CASE_: Optional[int] = {f.name for f in dataclasses.fields(cls)}
return cls(**{k: v for k, v in dic.items() if k in field_names})
def _SCREAMING_SNAKE_CASE ( self : Dict):
return self.version_str
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: str = _VERSION_REG.match(_UpperCAmelCase )
if not res:
raise ValueError(f"Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits." )
return tuple(int(_UpperCAmelCase ) for v in [res.group("major" ), res.group("minor" ), res.group("patch" )] )
def A_ ( _UpperCAmelCase ):
return ".".join(str(_UpperCAmelCase ) for v in version_tuple )
| 13 |
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowercase :
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any=13 , lowerCAmelCase__ : Tuple=30 , lowerCAmelCase__ : List[str]=2 , lowerCAmelCase__ : int=3 , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : str=32 , lowerCAmelCase__ : Any=5 , lowerCAmelCase__ : str=4 , lowerCAmelCase__ : int=37 , lowerCAmelCase__ : Optional[Any]="gelu" , lowerCAmelCase__ : Optional[int]=0.1 , lowerCAmelCase__ : Dict=0.1 , lowerCAmelCase__ : Tuple=10 , lowerCAmelCase__ : Optional[Any]=0.02 , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Union[str, Any]=2 , ):
SCREAMING_SNAKE_CASE_: str = parent
SCREAMING_SNAKE_CASE_: Optional[Any] = batch_size
SCREAMING_SNAKE_CASE_: str = image_size
SCREAMING_SNAKE_CASE_: Tuple = patch_size
SCREAMING_SNAKE_CASE_: int = num_channels
SCREAMING_SNAKE_CASE_: List[str] = is_training
SCREAMING_SNAKE_CASE_: str = use_labels
SCREAMING_SNAKE_CASE_: int = hidden_size
SCREAMING_SNAKE_CASE_: List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_: Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE_: Any = intermediate_size
SCREAMING_SNAKE_CASE_: str = hidden_act
SCREAMING_SNAKE_CASE_: str = hidden_dropout_prob
SCREAMING_SNAKE_CASE_: List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: int = type_sequence_label_size
SCREAMING_SNAKE_CASE_: Dict = initializer_range
SCREAMING_SNAKE_CASE_: Dict = scope
SCREAMING_SNAKE_CASE_: Dict = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE_: List[Any] = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE_: Dict = num_patches + 1
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE_: str = None
if self.use_labels:
SCREAMING_SNAKE_CASE_: Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
SCREAMING_SNAKE_CASE_: Optional[Any] = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : Tuple):
SCREAMING_SNAKE_CASE_: Union[str, Any] = ViTModel(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: Optional[int] = model(lowerCAmelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Dict):
SCREAMING_SNAKE_CASE_: Optional[int] = ViTForMaskedImageModeling(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: str = model(lowerCAmelCase__)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
SCREAMING_SNAKE_CASE_: Dict = 1
SCREAMING_SNAKE_CASE_: List[str] = ViTForMaskedImageModeling(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE_: str = model(lowerCAmelCase__)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Tuple = self.type_sequence_label_size
SCREAMING_SNAKE_CASE_: List[str] = ViTForImageClassification(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: Any = model(lowerCAmelCase__ , labels=lowerCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
SCREAMING_SNAKE_CASE_: Union[str, Any] = 1
SCREAMING_SNAKE_CASE_: List[str] = ViTForImageClassification(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE_: Dict = model(lowerCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
): List[str] = config_and_inputs
SCREAMING_SNAKE_CASE_: Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : List[Any] = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
_UpperCAmelCase : Tuple = (
{'''feature-extraction''': ViTModel, '''image-classification''': ViTForImageClassification}
if is_torch_available()
else {}
)
_UpperCAmelCase : List[str] = True
_UpperCAmelCase : List[Any] = False
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Tuple = False
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: List[str] = ViTModelTester(self)
SCREAMING_SNAKE_CASE_: Union[str, Any] = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37)
def _SCREAMING_SNAKE_CASE ( self : Any):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds")
def _SCREAMING_SNAKE_CASE ( self : str):
pass
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Dict = model_class(lowerCAmelCase__)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
SCREAMING_SNAKE_CASE_: List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear))
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: List[Any] = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_: Optional[Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_: Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__)
@slow
def _SCREAMING_SNAKE_CASE ( self : int):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_: Union[str, Any] = ViTModel.from_pretrained(lowerCAmelCase__)
self.assertIsNotNone(lowerCAmelCase__)
def A_ ( ):
SCREAMING_SNAKE_CASE_: List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _SCREAMING_SNAKE_CASE ( self : int):
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224") if is_vision_available() else None
@slow
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: int = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224").to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE_: str = prepare_img()
SCREAMING_SNAKE_CASE_: Optional[Any] = image_processor(images=lowerCAmelCase__ , return_tensors="pt").to(lowerCAmelCase__)
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Optional[int] = model(**lowerCAmelCase__)
# verify the logits
SCREAMING_SNAKE_CASE_: Any = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = torch.tensor([-0.2744, 0.8215, -0.0836]).to(lowerCAmelCase__)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4))
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
SCREAMING_SNAKE_CASE_: str = ViTModel.from_pretrained("facebook/dino-vits8").to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=480)
SCREAMING_SNAKE_CASE_: List[Any] = prepare_img()
SCREAMING_SNAKE_CASE_: List[Any] = image_processor(images=lowerCAmelCase__ , return_tensors="pt")
SCREAMING_SNAKE_CASE_: int = inputs.pixel_values.to(lowerCAmelCase__)
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Optional[int] = model(lowerCAmelCase__ , interpolate_pos_encoding=lowerCAmelCase__)
# verify the logits
SCREAMING_SNAKE_CASE_: Tuple = torch.Size((1, 3601, 384))
self.assertEqual(outputs.last_hidden_state.shape , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]]).to(lowerCAmelCase__)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=1E-4))
@slow
@require_accelerate
@require_torch_gpu
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: Dict = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto")
SCREAMING_SNAKE_CASE_: int = self.default_image_processor
SCREAMING_SNAKE_CASE_: Union[str, Any] = prepare_img()
SCREAMING_SNAKE_CASE_: Dict = image_processor(images=lowerCAmelCase__ , return_tensors="pt")
SCREAMING_SNAKE_CASE_: str = inputs.pixel_values.to(lowerCAmelCase__)
# forward pass to make sure inference works in fp16
with torch.no_grad():
SCREAMING_SNAKE_CASE_: str = model(lowerCAmelCase__)
| 13 | 1 |
'''simple docstring'''
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def _snake_case ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = TapasConfig.from_json_file(SCREAMING_SNAKE_CASE__ )
# set absolute/relative position embeddings parameter
lowerCAmelCase = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
lowerCAmelCase = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE__ )
elif task == "WTQ":
# run_task_main.py hparams
lowerCAmelCase = 4
lowerCAmelCase = True
# hparam_utils.py hparams
lowerCAmelCase = 0.66_4694
lowerCAmelCase = 0.20_7951
lowerCAmelCase = 0.12_1194
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = False
lowerCAmelCase = 0.035_2513
lowerCAmelCase = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE__ )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
lowerCAmelCase = 4
lowerCAmelCase = False
# hparam_utils.py hparams
lowerCAmelCase = 36.4519
lowerCAmelCase = 0.90_3421
lowerCAmelCase = 222.088
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = 0.76_3141
lowerCAmelCase = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE__ )
elif task == "TABFACT":
lowerCAmelCase = TapasForSequenceClassification(config=SCREAMING_SNAKE_CASE__ )
elif task == "MLM":
lowerCAmelCase = TapasForMaskedLM(config=SCREAMING_SNAKE_CASE__ )
elif task == "INTERMEDIATE_PRETRAINING":
lowerCAmelCase = TapasModel(config=SCREAMING_SNAKE_CASE__ )
else:
raise ValueError(f'Task {task} not supported.' )
print(f'Building PyTorch model from configuration: {config}' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Save pytorch-model (weights and configuration)
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Save tokenizer files
print(f'Save tokenizer files to {pytorch_dump_path}' )
lowerCAmelCase = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" , model_max_length=512 )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.'
)
parser.add_argument(
'--reset_position_index_per_cell',
default=False,
action='store_true',
help='Whether to use relative position embeddings or not. Defaults to True.',
)
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--tapas_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained TAPAS model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
UpperCAmelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
) | 367 |
'''simple docstring'''
import tensorflow as tf
from ...tf_utils import shape_list
class __snake_case( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , A_ , A_ , A_ , A_ , A_=1 , A_=False , **A_ ) -> Optional[int]:
super().__init__(**A_ )
lowerCAmelCase = vocab_size
lowerCAmelCase = d_embed
lowerCAmelCase = d_proj
lowerCAmelCase = cutoffs + [vocab_size]
lowerCAmelCase = [0] + self.cutoffs
lowerCAmelCase = div_val
lowerCAmelCase = self.cutoffs[0]
lowerCAmelCase = len(self.cutoffs ) - 1
lowerCAmelCase = self.shortlist_size + self.n_clusters
lowerCAmelCase = keep_order
lowerCAmelCase = []
lowerCAmelCase = []
def __snake_case ( self , A_ ) -> int:
if self.n_clusters > 0:
lowerCAmelCase = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer="""zeros""" , trainable=A_ , name="""cluster_weight""" )
lowerCAmelCase = self.add_weight(
shape=(self.n_clusters,) , initializer="""zeros""" , trainable=A_ , name="""cluster_bias""" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
lowerCAmelCase = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer="""zeros""" , trainable=A_ , name=f'out_projs_._{i}' , )
self.out_projs.append(A_ )
else:
self.out_projs.append(A_ )
lowerCAmelCase = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer="""zeros""" , trainable=A_ , name=f'out_layers_._{i}_._weight' , )
lowerCAmelCase = self.add_weight(
shape=(self.vocab_size,) , initializer="""zeros""" , trainable=A_ , name=f'out_layers_._{i}_._bias' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
lowerCAmelCase, lowerCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase = self.d_embed // (self.div_val**i)
lowerCAmelCase = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer="""zeros""" , trainable=A_ , name=f'out_projs_._{i}' )
self.out_projs.append(A_ )
lowerCAmelCase = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer="""zeros""" , trainable=A_ , name=f'out_layers_._{i}_._weight' , )
lowerCAmelCase = self.add_weight(
shape=(r_idx - l_idx,) , initializer="""zeros""" , trainable=A_ , name=f'out_layers_._{i}_._bias' , )
self.out_layers.append((weight, bias) )
super().build(A_ )
@staticmethod
def __snake_case ( A_ , A_ , A_ , A_=None ) -> List[Any]:
lowerCAmelCase = x
if proj is not None:
lowerCAmelCase = tf.einsum("""ibd,ed->ibe""" , A_ , A_ )
return tf.einsum("""ibd,nd->ibn""" , A_ , A_ ) + b
@staticmethod
def __snake_case ( A_ , A_ ) -> Dict:
lowerCAmelCase = shape_list(A_ )
lowerCAmelCase = tf.range(lp_size[0] , dtype=target.dtype )
lowerCAmelCase = tf.stack([r, target] , 1 )
return tf.gather_nd(A_ , A_ )
def __snake_case ( self , A_ , A_ , A_=True , A_=False ) -> Tuple:
lowerCAmelCase = 0
if self.n_clusters == 0:
lowerCAmelCase = self._logit(A_ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
lowerCAmelCase = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=A_ , logits=A_ )
lowerCAmelCase = tf.nn.log_softmax(A_ , axis=-1 )
else:
lowerCAmelCase = shape_list(A_ )
lowerCAmelCase = []
lowerCAmelCase = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
lowerCAmelCase, lowerCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
lowerCAmelCase = (target >= l_idx) & (target < r_idx)
lowerCAmelCase = tf.where(A_ )
lowerCAmelCase = tf.boolean_mask(A_ , A_ ) - l_idx
if self.div_val == 1:
lowerCAmelCase = self.out_layers[0][0][l_idx:r_idx]
lowerCAmelCase = self.out_layers[0][1][l_idx:r_idx]
else:
lowerCAmelCase = self.out_layers[i][0]
lowerCAmelCase = self.out_layers[i][1]
if i == 0:
lowerCAmelCase = tf.concat([cur_W, self.cluster_weight] , 0 )
lowerCAmelCase = tf.concat([cur_b, self.cluster_bias] , 0 )
lowerCAmelCase = self._logit(A_ , A_ , A_ , self.out_projs[0] )
lowerCAmelCase = tf.nn.log_softmax(A_ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
lowerCAmelCase = tf.boolean_mask(A_ , A_ )
lowerCAmelCase = self._gather_logprob(A_ , A_ )
else:
lowerCAmelCase = self._logit(A_ , A_ , A_ , self.out_projs[i] )
lowerCAmelCase = tf.nn.log_softmax(A_ )
lowerCAmelCase = self.cutoffs[0] + i - 1 # No probability for the head cluster
lowerCAmelCase = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(A_ )
if target is not None:
lowerCAmelCase = tf.boolean_mask(A_ , A_ )
lowerCAmelCase = tf.boolean_mask(A_ , A_ )
lowerCAmelCase = self._gather_logprob(A_ , A_ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(A_ , -cur_logprob , shape_list(A_ ) )
lowerCAmelCase = tf.concat(A_ , axis=-1 )
if target is not None:
if return_mean:
lowerCAmelCase = tf.reduce_mean(A_ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(A_ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(A_ , name=self.name , aggregation="""mean""" if return_mean else """""" )
return out | 187 | 0 |
A : Tuple = [0, 2, 4, 6, 8]
A : Dict = [1, 3, 5, 7, 9]
def lowercase_ ( _A : int , _A : int , _A : list[int] , _A : int ):
"""simple docstring"""
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
lowerCamelCase__ : Any = 0
for digit in range(10 ):
lowerCamelCase__ : int = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , __a , __a )
return result
lowerCamelCase__ : List[str] = 0
for digita in range(10 ):
lowerCamelCase__ : Optional[Any] = digita
if (remainder + digita) % 2 == 0:
lowerCamelCase__ : List[str] = ODD_DIGITS
else:
lowerCamelCase__ : Union[str, Any] = EVEN_DIGITS
for digita in other_parity_digits:
lowerCamelCase__ : int = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , __a , __a , )
return result
def lowercase_ ( _A : int = 9 ):
"""simple docstring"""
lowerCamelCase__ : int = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(__a , 0 , [0] * length , __a )
return result
if __name__ == "__main__":
print(f'{solution() = }')
| 184 |
def __lowerCamelCase ( __a :str ) -> list:
"""simple docstring"""
A__ = [0] * len(__a )
for i in range(1 , len(__a ) ):
# use last results for better performance - dynamic programming
A__ = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
A__ = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
A__ = j
return prefix_result
def __lowerCamelCase ( __a :str ) -> int:
"""simple docstring"""
return max(prefix_function(__a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 274 | 0 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE_ )
class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase : Optional[Any] = field(default="audio-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
lowerCAmelCase : int = Features({"audio": Audio()} )
lowerCAmelCase : Union[str, Any] = Features({"labels": ClassLabel} )
lowerCAmelCase : Any = "audio"
lowerCAmelCase : str = "labels"
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Optional[int] ) ->str:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , snake_case__ ):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""" )
_UpperCAmelCase : List[str] = copy.deepcopy(self )
_UpperCAmelCase : Tuple = self.label_schema.copy()
_UpperCAmelCase : Any = features[self.label_column]
_UpperCAmelCase : Union[str, Any] = label_schema
return task_template
@property
def lowerCAmelCase__ ( self : int ) ->List[str]:
'''simple docstring'''
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 353 |
'''simple docstring'''
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowerCamelCase__ = version.parse(importlib_metadata.version('nltk'))
if NLTK_VERSION >= version.Version('3.6.4'):
from nltk import word_tokenize
lowerCamelCase__ = '\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n'
lowerCamelCase__ = '\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n'
lowerCamelCase__ = '\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]\n >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results["meteor"], 4))\n 0.6944\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"] , reference_urls=[
"https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score",
"https://en.wikipedia.org/wiki/METEOR",
] , )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : List[str] ) ->int:
'''simple docstring'''
import nltk
nltk.download("wordnet" )
if NLTK_VERSION >= version.Version("3.6.5" ):
nltk.download("punkt" )
if NLTK_VERSION >= version.Version("3.6.6" ):
nltk.download("omw-1.4" )
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int=0.9 , lowerCamelCase__ : Dict=3 , lowerCamelCase__ : Dict=0.5 ) ->Any:
'''simple docstring'''
if NLTK_VERSION >= version.Version("3.6.5" ):
_UpperCAmelCase : Dict = [
meteor_score.single_meteor_score(
word_tokenize(lowerCamelCase__ ) , word_tokenize(lowerCamelCase__ ) , alpha=lowerCamelCase__ , beta=lowerCamelCase__ , gamma=lowerCamelCase__ )
for ref, pred in zip(lowerCamelCase__ , lowerCamelCase__ )
]
else:
_UpperCAmelCase : Optional[int] = [
meteor_score.single_meteor_score(lowerCamelCase__ , lowerCamelCase__ , alpha=lowerCamelCase__ , beta=lowerCamelCase__ , gamma=lowerCamelCase__ )
for ref, pred in zip(lowerCamelCase__ , lowerCamelCase__ )
]
return {"meteor": np.mean(lowerCamelCase__ )}
| 322 | 0 |
'''simple docstring'''
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__lowerCAmelCase = logging.getLogger(__name__)
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = False , ) -> Dict:
_a : str = bnb_quantization_config.load_in_abit
_a : Dict = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'
' make sure you have the latest version of `bitsandbytes` installed.' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'
'make sure you have the latest version of `bitsandbytes` installed.' )
_a : str = []
# custom device map
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and len(device_map.keys() ) > 1:
_a : List[Any] = [key for key, value in device_map.items() if value in ['disk', 'cpu']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
_a : Optional[int] = get_keys_to_not_convert(lowerCAmelCase_ )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(lowerCAmelCase_ )
_a : Any = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
_a : List[str] = []
_a : Optional[Any] = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(lowerCAmelCase_ )
# compatibility with peft
_a : Optional[Any] = load_in_abit
_a : Dict = load_in_abit
_a : Tuple = get_parameter_device(lowerCAmelCase_ )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'It is not recommended to quantize a loaded model. '
'The model should be instantiated under the `init_empty_weights` context manager.' )
_a : int = replace_with_bnb_layers(lowerCAmelCase_ , lowerCAmelCase_ , modules_to_not_convert=lowerCAmelCase_ )
# convert param to the right dtype
_a : Dict = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
_a : Any = name.replace('.weight' , '' ).replace('.bias' , '' )
_a : str = getattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(lowerCAmelCase_ ):
param.to(lowerCAmelCase_ )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info(
f"""The model device type is {model_device.type}. However, cuda is needed for quantization."""
'We move the model to cuda.' )
return model
elif weights_location is None:
raise RuntimeError(
f"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ )
else:
with init_empty_weights():
_a : str = replace_with_bnb_layers(
lowerCAmelCase_ , lowerCAmelCase_ , modules_to_not_convert=lowerCAmelCase_ )
_a : List[Any] = get_quantized_model_device_map(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , max_memory=lowerCAmelCase_ , no_split_module_classes=lowerCAmelCase_ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
_a : Any = True
_a : Union[str, Any] = any(x in list(device_map.values() ) for x in ['cpu', 'disk'] )
load_checkpoint_in_model(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , dtype=bnb_quantization_config.torch_dtype , offload_folder=lowerCAmelCase_ , offload_state_dict=lowerCAmelCase_ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(lowerCAmelCase_ , device_map=lowerCAmelCase_ , offload_dir=lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None ) -> Optional[int]:
if device_map is None:
if torch.cuda.is_available():
_a : List[Any] = {'': torch.cuda.current_device()}
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info('The device_map was not initialized.' 'Setting device_map to `{\'\':torch.cuda.current_device()}`.' )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '
'\'sequential\'.' )
_a : Union[str, Any] = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
_a : int = {}
_a : str = special_dtypes
_a : Tuple = no_split_module_classes
_a : Optional[Any] = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
_a : Optional[Any] = get_balanced_memory(
lowerCAmelCase_ , low_zero=(device_map == 'balanced_low_0') , max_memory=lowerCAmelCase_ , **lowerCAmelCase_ , )
_a : Optional[Any] = max_memory
_a : Any = infer_auto_device_map(lowerCAmelCase_ , **lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
# check if don't have any quantized module on the cpu
_a : Tuple = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
_a : Tuple = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ' )
else:
logger.info(
'Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit' )
del device_map_without_some_modules
return device_map
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None ) -> Tuple:
if modules_to_not_convert is None:
_a : Optional[int] = []
_a , _a : Tuple = _replace_with_bnb_layers(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> List[Any]:
_a : Any = False
for name, module in model.named_children():
if current_key_name is None:
_a : List[Any] = []
current_key_name.append(lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
_a : str = '.'.join(lowerCAmelCase_ )
_a : Any = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
_a : Any = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
_a : Tuple = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=lowerCAmelCase_ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
_a : Optional[Any] = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('load_in_8bit and load_in_4bit can\'t be both False' )
_a : Optional[Any] = module.weight.data
if module.bias is not None:
_a : Union[str, Any] = module.bias.data
bnb_module.requires_grad_(lowerCAmelCase_ )
setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_a : Optional[int] = True
if len(list(module.children() ) ) > 0:
_a , _a : Optional[int] = _replace_with_bnb_layers(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_a : Any = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __lowerCamelCase ( lowerCAmelCase_ ) -> int:
# Create a copy of the model
with init_empty_weights():
_a : Optional[int] = deepcopy(lowerCAmelCase_ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
_a : int = find_tied_parameters(lowerCAmelCase_ )
# For compatibility with Accelerate < 0.18
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_a : str = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
_a : List[str] = sum(lowerCAmelCase_ , [] )
_a : Union[str, Any] = len(lowerCAmelCase_ ) > 0
# Check if it is a base model
_a : str = False
if hasattr(lowerCAmelCase_ , 'base_model_prefix' ):
_a : str = not hasattr(lowerCAmelCase_ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
_a : Optional[int] = list(model.named_children() )
_a : int = [list_modules[-1][0]]
# add last module together with tied weights
_a : str = set(lowerCAmelCase_ ) - set(lowerCAmelCase_ )
_a : Optional[Any] = list(set(lowerCAmelCase_ ) ) + list(lowerCAmelCase_ )
# remove ".weight" from the keys
_a : List[str] = ['.weight', '.bias']
_a : Any = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
_a : int = name.replace(lowerCAmelCase_ , '' )
filtered_module_names.append(lowerCAmelCase_ )
return filtered_module_names
def __lowerCamelCase ( lowerCAmelCase_ ) -> Optional[Any]:
for m in model.modules():
if isinstance(lowerCAmelCase_ , bnb.nn.Linearabit ):
return True
return False
def __lowerCamelCase ( lowerCAmelCase_ ) -> Optional[Any]:
return next(parameter.parameters() ).device
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(lowerCAmelCase_ , lowerCAmelCase_ , 0 , dtype=lowerCAmelCase_ , value=lowerCAmelCase_ )
_a : List[Any] = param_name
_a : List[Any] = model
if "." in tensor_name:
_a : Tuple = tensor_name.split('.' )
for split in splits[:-1]:
_a : Union[str, Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if new_module is None:
raise ValueError(f"""{module} has no attribute {split}.""" )
_a : Dict = new_module
_a : List[Any] = splits[-1]
# offload weights
_a : List[str] = False
offload_weight(module._parameters[tensor_name] , lowerCAmelCase_ , lowerCAmelCase_ , index=lowerCAmelCase_ )
if hasattr(module._parameters[tensor_name] , 'SCB' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('weight' , 'SCB' ) , lowerCAmelCase_ , index=lowerCAmelCase_ , )
else:
offload_weight(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , index=lowerCAmelCase_ )
offload_weight(lowerCAmelCase_ , param_name.replace('weight' , 'SCB' ) , lowerCAmelCase_ , index=lowerCAmelCase_ )
set_module_tensor_to_device(lowerCAmelCase_ , lowerCAmelCase_ , 'meta' , dtype=lowerCAmelCase_ , value=torch.empty(*param.size() ) )
| 89 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {'vocab_file': 'spiece.model'}
UpperCAmelCase_ = {
'vocab_file': {
'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model',
}
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Any=False , _UpperCAmelCase : int=True , _UpperCAmelCase : Union[str, Any]=False , _UpperCAmelCase : Dict="<s>" , _UpperCAmelCase : int="</s>" , _UpperCAmelCase : Dict="<unk>" , _UpperCAmelCase : Tuple="<sep>" , _UpperCAmelCase : List[Any]="<pad>" , _UpperCAmelCase : int="<cls>" , _UpperCAmelCase : Union[str, Any]="<mask>" , _UpperCAmelCase : List[str]=["<eop>", "<eod>"] , _UpperCAmelCase : Optional[Dict[str, Any]] = None , **_UpperCAmelCase : int , ):
"""simple docstring"""
UpperCAmelCase__ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token
UpperCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_UpperCAmelCase , remove_space=_UpperCAmelCase , keep_accents=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , )
UpperCAmelCase__ = 3
UpperCAmelCase__ = do_lower_case
UpperCAmelCase__ = remove_space
UpperCAmelCase__ = keep_accents
UpperCAmelCase__ = vocab_file
UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCAmelCase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"""You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """
"""See https://pypi.org/project/jieba/ for installation.""" )
UpperCAmelCase__ = jieba
UpperCAmelCase__ = str.maketrans(""" \n""" , """\u2582\u2583""" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
return len(self.sp_model )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = self.__dict__.copy()
UpperCAmelCase__ = None
return state
def __setstate__( self : Union[str, Any] , _UpperCAmelCase : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
UpperCAmelCase__ = {}
UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
if self.remove_space:
UpperCAmelCase__ = """ """.join(inputs.strip().split() )
else:
UpperCAmelCase__ = inputs
UpperCAmelCase__ = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
UpperCAmelCase__ = unicodedata.normalize("""NFKD""" , _UpperCAmelCase )
UpperCAmelCase__ = """""".join([c for c in outputs if not unicodedata.combining(_UpperCAmelCase )] )
if self.do_lower_case:
UpperCAmelCase__ = outputs.lower()
return outputs
def SCREAMING_SNAKE_CASE__ ( self : Tuple , _UpperCAmelCase : str ):
"""simple docstring"""
UpperCAmelCase__ = self.preprocess_text(_UpperCAmelCase )
UpperCAmelCase__ = self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase )
UpperCAmelCase__ = []
for piece in pieces:
if len(_UpperCAmelCase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
UpperCAmelCase__ = self.sp_model.EncodeAsPieces(piece[:-1].replace(_UpperCAmelCase , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
UpperCAmelCase__ = cur_pieces[1:]
else:
UpperCAmelCase__ = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_UpperCAmelCase )
else:
new_pieces.append(_UpperCAmelCase )
return new_pieces
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : Union[str, Any] ):
"""simple docstring"""
return self.sp_model.PieceToId(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : Any ):
"""simple docstring"""
return self.sp_model.IdToPiece(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Dict ):
"""simple docstring"""
UpperCAmelCase__ = """""".join(_UpperCAmelCase ).replace(_UpperCAmelCase , """ """ ).strip()
return out_string
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ):
"""simple docstring"""
UpperCAmelCase__ = [self.sep_token_id]
UpperCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None , _UpperCAmelCase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is not None:
return ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1, 1]
return ([0] * len(_UpperCAmelCase )) + [1, 1]
def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ):
"""simple docstring"""
UpperCAmelCase__ = [self.sep_token_id]
UpperCAmelCase__ = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase__ = os.path.join(
_UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCAmelCase , """wb""" ) as fi:
UpperCAmelCase__ = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE__ ( self : Tuple , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = super()._decode(*_UpperCAmelCase , **_UpperCAmelCase )
UpperCAmelCase__ = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" )
return text
| 346 | 0 |
from collections import defaultdict
from math import ceil, sqrt
def __magic_name__ ( __lowerCAmelCase : int = 100_0000 , __lowerCAmelCase : int = 10 ) -> int:
__lowerCamelCase = defaultdict(__lowerCAmelCase )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
__lowerCamelCase = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
__lowerCamelCase = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(__lowerCAmelCase , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F'{solution() = }')
| 339 |
from datetime import datetime as dt
import os
from github import Github
SCREAMING_SNAKE_CASE__ : Any = [
"good first issue",
"good second issue",
"good difficult issue",
"feature request",
"new model",
"wip",
]
def __magic_name__ ( ) -> Any:
__lowerCamelCase = Github(os.environ['''GITHUB_TOKEN'''] )
__lowerCamelCase = g.get_repo('''huggingface/transformers''' )
__lowerCamelCase = repo.get_issues(state='''open''' )
for issue in open_issues:
__lowerCamelCase = sorted([comment for comment in issue.get_comments()] , key=lambda __lowerCAmelCase : i.created_at , reverse=__lowerCAmelCase )
__lowerCamelCase = comments[0] if len(__lowerCAmelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 339 | 1 |
def lowerCAmelCase_ ( __lowerCamelCase ):
if num < 0:
return False
__snake_case : int = num
__snake_case : int = 0
while num > 0:
__snake_case : Optional[Any] = rev_num * 1_0 + (num % 1_0)
num //= 1_0
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 123 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A : Optional[int] = {
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
A : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 57 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class _snake_case ( unittest.TestCase ):
@slow
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ = TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" )
snake_case_ = AutoTokenizer.from_pretrained("google/mt5-small" )
snake_case_ = tokenizer("Hello there" , return_tensors="tf" ).input_ids
snake_case_ = tokenizer("Hi I am" , return_tensors="tf" ).input_ids
snake_case_ = model(a__ , labels=a__ ).loss
snake_case_ = -tf.math.reduce_mean(a__ ).numpy()
snake_case_ = -2_1.2_2_8_1_6_8
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
| 359 |
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class _snake_case ( unittest.TestCase , lowercase_ ):
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ = load_tool("text-classification" )
self.tool.setup()
snake_case_ = load_tool("text-classification" , remote=a__ )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ = self.tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ = self.remote_tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ = self.tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(a__ , "positive" )
| 92 | 0 |
"""simple docstring"""
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
A : List[str] = "python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase=None ):
'''simple docstring'''
require_version(deps[pkg] , _UpperCamelCase )
| 57 |
import functools
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
if not isinstance(_A , _A ) or not all(isinstance(_A , _A ) for day in days ):
raise ValueError("The parameter days should be a list of integers" )
if len(_A ) != 3 or not all(isinstance(_A , _A ) for cost in costs ):
raise ValueError("The parameter costs should be a list of three integers" )
if len(_A ) == 0:
return 0
if min(_A ) <= 0:
raise ValueError("All days elements should be greater than 0" )
if max(_A ) >= 366:
raise ValueError("All days elements should be less than 366" )
snake_case_ = set(_A )
@functools.cache
def dynamic_programming(_A ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 187 | 0 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class a ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def UpperCAmelCase ( self ) -> Dict:
_A = pipeline(
task="""zero-shot-audio-classification""" , model="""hf-internal-testing/tiny-clap-htsat-unfused""" )
_A = load_dataset("""ashraq/esc50""" )
_A = dataset["""train"""]["""audio"""][-1]["""array"""]
_A = audio_classifier(lowerCAmelCase_ , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , [{"""score""": 0.501, """label""": """Sound of a dog"""}, {"""score""": 0.499, """label""": """Sound of vaccum cleaner"""}] , )
@unittest.skip("""No models are available in TF""" )
def UpperCAmelCase ( self ) -> Tuple:
pass
@slow
@require_torch
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = pipeline(
task="""zero-shot-audio-classification""" , model="""laion/clap-htsat-unfused""" , )
# This is an audio of a dog
_A = load_dataset("""ashraq/esc50""" )
_A = dataset["""train"""]["""audio"""][-1]["""array"""]
_A = audio_classifier(lowerCAmelCase_ , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , [
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
] , )
_A = audio_classifier([audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , [
[
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 , )
_A = audio_classifier(
[audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] , batch_size=5 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , [
[
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 , )
@unittest.skip("""No models are available in TF""" )
def UpperCAmelCase ( self ) -> Optional[Any]:
pass
| 354 | import cva
import numpy as np
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
if k in (0.04, 0.06):
_A = k
_A = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self ) -> str:
return str(self.k )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> tuple[cva.Mat, list[list[int]]]:
_A = cva.imread(lowerCAmelCase_ , 0 )
_A , _A = img.shape
_A = []
_A = img.copy()
_A = cva.cvtColor(lowerCAmelCase_ , cva.COLOR_GRAY2RGB )
_A , _A = np.gradient(lowerCAmelCase_ )
_A = dx**2
_A = dy**2
_A = dx * dy
_A = 0.04
_A = self.window_size // 2
for y in range(lowerCAmelCase_ , h - offset ):
for x in range(lowerCAmelCase_ , w - offset ):
_A = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_A = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_A = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_A = (wxx * wyy) - (wxy**2)
_A = wxx + wyy
_A = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_55 )
return color_img, corner_list
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = HarrisCorner(0.04, 3)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img)
| 81 | 0 |
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
def lowercase_ ( _snake_case ,_snake_case ):
def run_func(_snake_case ):
@wraps(_snake_case )
def run_in_eager_mode(*_snake_case ,**_snake_case ):
return func(*_snake_case ,**_snake_case )
@wraps(_snake_case )
@tf.function(experimental_compile=_snake_case )
def run_in_graph_mode(*_snake_case ,**_snake_case ):
return func(*_snake_case ,**_snake_case )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"""Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Dict = random.Random()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [rng.randint(0 ,vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(_snake_case ,shape=(batch_size, sequence_length) ,dtype=tf.intaa )
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : TensorFlowBenchmarkArguments
__UpperCamelCase : PretrainedConfig
__UpperCamelCase : str = "TensorFlow"
@property
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
return tf.__version__
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> float:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
SCREAMING_SNAKE_CASE__ : Dict = self._prepare_inference_func(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._measure_speed(_inference )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> float:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._prepare_train_func(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._measure_speed(_train )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> [Memory, Optional[MemorySummary]]:
"""simple docstring"""
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
SCREAMING_SNAKE_CASE__ : Dict = self._prepare_inference_func(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._measure_memory(_inference )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> [Memory, Optional[MemorySummary]]:
"""simple docstring"""
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
SCREAMING_SNAKE_CASE__ : List[str] = self._prepare_train_func(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._measure_memory(_train )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Callable[[], None]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = (
hasattr(SCREAMING_SNAKE_CASE__ , """architectures""" )
and isinstance(config.architectures , SCREAMING_SNAKE_CASE__ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
SCREAMING_SNAKE_CASE__ : Any = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
SCREAMING_SNAKE_CASE__ : Union[str, Any] = __import__("""transformers""" , fromlist=[model_class] )
SCREAMING_SNAKE_CASE__ : Any = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = model_cls(SCREAMING_SNAKE_CASE__ )
except ImportError:
raise ImportError(
F'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
SCREAMING_SNAKE_CASE__ : str = TF_MODEL_MAPPING[config.__class__](SCREAMING_SNAKE_CASE__ )
# encoder-decoder has vocab size saved differently
SCREAMING_SNAKE_CASE__ : int = config.vocab_size if hasattr(SCREAMING_SNAKE_CASE__ , """vocab_size""" ) else config.encoder.vocab_size
SCREAMING_SNAKE_CASE__ : List[str] = random_input_ids(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Callable[[], None]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" )
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
SCREAMING_SNAKE_CASE__ : List[str] = (
hasattr(SCREAMING_SNAKE_CASE__ , """architectures""" )
and isinstance(config.architectures , SCREAMING_SNAKE_CASE__ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
SCREAMING_SNAKE_CASE__ : Any = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
SCREAMING_SNAKE_CASE__ : List[str] = __import__("""transformers""" , fromlist=[model_class] )
SCREAMING_SNAKE_CASE__ : Tuple = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = model_cls(SCREAMING_SNAKE_CASE__ )
except ImportError:
raise ImportError(
F'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](SCREAMING_SNAKE_CASE__ )
# encoder-decoder has vocab size saved differently
SCREAMING_SNAKE_CASE__ : List[Any] = config.vocab_size if hasattr(SCREAMING_SNAKE_CASE__ , """vocab_size""" ) else config.encoder.vocab_size
SCREAMING_SNAKE_CASE__ : int = random_input_ids(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
SCREAMING_SNAKE_CASE__ : Optional[int] = model(SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )[0]
SCREAMING_SNAKE_CASE__ : Any = tf.gradients(SCREAMING_SNAKE_CASE__ , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
SCREAMING_SNAKE_CASE__ : Dict = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )[0]
SCREAMING_SNAKE_CASE__ : Any = tf.gradients(SCREAMING_SNAKE_CASE__ , model.trainable_variables )
return gradients
SCREAMING_SNAKE_CASE__ : Any = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> float:
"""simple docstring"""
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" )
timeit.repeat(SCREAMING_SNAKE_CASE__ , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
SCREAMING_SNAKE_CASE__ : Optional[int] = timeit.repeat(
SCREAMING_SNAKE_CASE__ , repeat=self.args.repeat , number=10 , )
return min(SCREAMING_SNAKE_CASE__ ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F'''Doesn\'t fit on GPU. {e}''' )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> [Memory, MemorySummary]:
"""simple docstring"""
logger.info(
"""Note that TensorFlow allocates more memory than """
"""it might need to speed up computation. """
"""The memory reported here corresponds to the memory """
"""reported by `nvidia-smi`, which can vary depending """
"""on total available memory on the GPU that is used.""" )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"""`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"""
""" consumption line by line.""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = start_memory_tracing("""transformers""" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"""Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"""
""" with `args.memory=False`""" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"""py3nvml not installed, we won't log GPU memory usage. """
"""Install py3nvml (pip install py3nvml) to log information about GPU.""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """N/A"""
else:
logger.info(
"""Measuring total GPU usage on GPU device. Make sure to not have additional processes"""
""" running on the same GPU.""" )
# init nvml
nvml.nvmlInit()
func()
SCREAMING_SNAKE_CASE__ : List[Any] = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = nvml.nvmlDeviceGetMemoryInfo(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = meminfo.used
SCREAMING_SNAKE_CASE__ : str = Memory(SCREAMING_SNAKE_CASE__ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"""When enabling line by line tracing, the max peak memory for CPU is inaccurate in"""
""" TensorFlow.""" )
SCREAMING_SNAKE_CASE__ : Any = None
else:
SCREAMING_SNAKE_CASE__ : List[Any] = measure_peak_memory_cpu(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = Memory(SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else memory_bytes
if self.args.trace_memory_line_by_line:
SCREAMING_SNAKE_CASE__ : List[Any] = stop_memory_tracing(SCREAMING_SNAKE_CASE__ )
if memory is None:
SCREAMING_SNAKE_CASE__ : Optional[int] = summary.total
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F'''Doesn\'t fit on GPU. {e}''' )
return "N/A", None
| 25 |
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 322 | 0 |
"""simple docstring"""
def _snake_case ( lowercase__ : Any ) -> list:
'''simple docstring'''
lowerCAmelCase_ :List[str] = [0] * len(__SCREAMING_SNAKE_CASE )
for i in range(1 , len(__SCREAMING_SNAKE_CASE ) ):
# use last results for better performance - dynamic programming
lowerCAmelCase_ :Optional[Any] = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
lowerCAmelCase_ :List[Any] = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
lowerCAmelCase_ :Union[str, Any] = j
return prefix_result
def _snake_case ( lowercase__ : List[Any] ) -> int:
'''simple docstring'''
return max(prefix_function(__SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCAmelCase = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 1 | 0 |
from collections import defaultdict
from math import ceil, sqrt
def A ( _UpperCAmelCase : int = 1_000_000 , _UpperCAmelCase : int = 10 ) -> int:
'''simple docstring'''
_UpperCAmelCase = defaultdict(_UpperCAmelCase )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
_UpperCAmelCase = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
_UpperCAmelCase = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(_UpperCAmelCase , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 339 |
import os
import sys
import unittest
UpperCAmelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
UpperCAmelCase__ = os.path.join(git_repo_path, "src", "diffusers")
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = find_backend(' if not is_torch_available():')
self.assertEqual(A , 'torch')
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
_UpperCAmelCase = find_backend(' if not (is_torch_available() and is_transformers_available()):')
self.assertEqual(A , 'torch_and_transformers')
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
_UpperCAmelCase = find_backend(
' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):')
self.assertEqual(A , 'torch_and_transformers_and_onnx')
def _lowerCamelCase ( self : int) -> Dict:
"""simple docstring"""
_UpperCAmelCase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , A)
self.assertIn('torch_and_transformers' , A)
self.assertIn('flax_and_transformers' , A)
self.assertIn('torch_and_transformers_and_onnx' , A)
# Likewise, we can't assert on the exact content of a key
self.assertIn('UNet2DModel' , objects['torch'])
self.assertIn('FlaxUNet2DConditionModel' , objects['flax'])
self.assertIn('StableDiffusionPipeline' , objects['torch_and_transformers'])
self.assertIn('FlaxStableDiffusionPipeline' , objects['flax_and_transformers'])
self.assertIn('LMSDiscreteScheduler' , objects['torch_and_scipy'])
self.assertIn('OnnxStableDiffusionPipeline' , objects['torch_and_transformers_and_onnx'])
def _lowerCamelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = create_dummy_object('CONSTANT' , '\'torch\'')
self.assertEqual(A , '\nCONSTANT = None\n')
_UpperCAmelCase = create_dummy_object('function' , '\'torch\'')
self.assertEqual(
A , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n')
_UpperCAmelCase = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n'
_UpperCAmelCase = create_dummy_object('FakeClass' , '\'torch\'')
self.assertEqual(A , A)
def _lowerCamelCase ( self : Dict) -> int:
"""simple docstring"""
_UpperCAmelCase = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n'
_UpperCAmelCase = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']})
self.assertEqual(dummy_files['torch'] , A)
| 339 | 1 |
"""simple docstring"""
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
SCREAMING_SNAKE_CASE__:Dict = """\
"""
SCREAMING_SNAKE_CASE__:str = """
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
"""
SCREAMING_SNAKE_CASE__:Union[str, Any] = """
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to 'cuda' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]
>>> results = perplexity.compute(model_id='gpt2',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
78.22
>>> print(round(results[\"perplexities\"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = datasets.load_dataset(\"wikitext\",
... \"wikitext-2-raw-v1\",
... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!='']
>>> results = perplexity.compute(model_id='gpt2',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
60.35
>>> print(round(results[\"perplexities\"][0], 2))
81.12
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class snake_case__ ( datasets.Metric ):
def a__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"input_texts": datasets.Value("string" ),
} ) , reference_urls=["https://huggingface.co/docs/transformers/perplexity"] , )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = 16 , lowerCamelCase = True , lowerCamelCase=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
__a = "cuda"
else:
__a = "cuda" if torch.cuda.is_available() else "cpu"
__a = AutoModelForCausalLM.from_pretrained(lowerCamelCase )
__a = model.to(lowerCamelCase )
__a = AutoTokenizer.from_pretrained(lowerCamelCase )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
__a = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(lowerCamelCase ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"pad_token": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
__a = model.config.max_length - 1
else:
__a = model.config.max_length
__a = tokenizer(
lowerCamelCase , add_special_tokens=lowerCamelCase , padding=lowerCamelCase , truncation=lowerCamelCase , max_length=lowerCamelCase , return_tensors="pt" , return_attention_mask=lowerCamelCase , ).to(lowerCamelCase )
__a = encodings["input_ids"]
__a = encodings["attention_mask"]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
__a = []
__a = CrossEntropyLoss(reduction="none" )
for start_index in logging.tqdm(range(0 , len(lowerCamelCase ) , lowerCamelCase ) ):
__a = min(start_index + batch_size , len(lowerCamelCase ) )
__a = encoded_texts[start_index:end_index]
__a = attn_masks[start_index:end_index]
if add_start_token:
__a = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(lowerCamelCase )
__a = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
__a = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(lowerCamelCase ), attn_mask] , dim=1 )
__a = encoded_batch
with torch.no_grad():
__a = model(lowerCamelCase , attention_mask=lowerCamelCase ).logits
__a = out_logits[..., :-1, :].contiguous()
__a = labels[..., 1:].contiguous()
__a = attn_mask[..., 1:].contiguous()
__a = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , lowerCamelCase ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(lowerCamelCase )}
| 268 | """simple docstring"""
SCREAMING_SNAKE_CASE__:Any = {
"""A""": """.-""", """B""": """-...""", """C""": """-.-.""", """D""": """-..""", """E""": """.""", """F""": """..-.""", """G""": """--.""",
"""H""": """....""", """I""": """..""", """J""": """.---""", """K""": """-.-""", """L""": """.-..""", """M""": """--""", """N""": """-.""",
"""O""": """---""", """P""": """.--.""", """Q""": """--.-""", """R""": """.-.""", """S""": """...""", """T""": """-""", """U""": """..-""",
"""V""": """...-""", """W""": """.--""", """X""": """-..-""", """Y""": """-.--""", """Z""": """--..""", """1""": """.----""",
"""2""": """..---""", """3""": """...--""", """4""": """....-""", """5""": """.....""", """6""": """-....""", """7""": """--...""",
"""8""": """---..""", """9""": """----.""", """0""": """-----""", """&""": """.-...""", """@""": """.--.-.""",
""":""": """---...""", """,""": """--..--""", """.""": """.-.-.-""", """'""": """.----.""", """\"""": """.-..-.""",
"""?""": """..--..""", """/""": """-..-.""", """=""": """-...-""", """+""": """.-.-.""", """-""": """-....-""",
"""(""": """-.--.""", """)""": """-.--.-""", """!""": """-.-.--""", """ """: """/"""
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
SCREAMING_SNAKE_CASE__:Any = {value: key for key, value in MORSE_CODE_DICT.items()}
def _lowerCamelCase( a ):
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def _lowerCamelCase( a ):
return "".join(REVERSE_DICT[char] for char in message.split() )
def _lowerCamelCase( ):
__a = "Morse code here!"
print(a )
__a = encrypt(a )
print(a )
__a = decrypt(a )
print(a )
if __name__ == "__main__":
main()
| 268 | 1 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def A (__A : List[str] ) -> Any:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = image.size
UpperCAmelCase_ , UpperCAmelCase_ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
UpperCAmelCase_ = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
UpperCAmelCase_ = np.array(__A ).astype(np.floataa ) / 255.0
UpperCAmelCase_ = image[None].transpose(0 , 3 , 1 , 2 )
UpperCAmelCase_ = torch.from_numpy(__A )
return 2.0 * image - 1.0
class __snake_case ( a ):
def __init__( self : int , _snake_case : VQModel , _snake_case : UNetaDModel , _snake_case : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=_snake_case , unet=_snake_case , scheduler=_snake_case)
@torch.no_grad()
def __call__( self : List[Any] , _snake_case : Union[torch.Tensor, PIL.Image.Image] = None , _snake_case : Optional[int] = 1 , _snake_case : Optional[int] = 100 , _snake_case : Optional[float] = 0.0 , _snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _snake_case : Optional[str] = "pil" , _snake_case : bool = True , ):
"""simple docstring"""
if isinstance(_snake_case , PIL.Image.Image):
UpperCAmelCase_ = 1
elif isinstance(_snake_case , torch.Tensor):
UpperCAmelCase_ = image.shape[0]
else:
raise ValueError(F"""`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(_snake_case)}""")
if isinstance(_snake_case , PIL.Image.Image):
UpperCAmelCase_ = preprocess(_snake_case)
UpperCAmelCase_ , UpperCAmelCase_ = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
UpperCAmelCase_ = (batch_size, self.unet.config.in_channels // 2, height, width)
UpperCAmelCase_ = next(self.unet.parameters()).dtype
UpperCAmelCase_ = randn_tensor(_snake_case , generator=_snake_case , device=self.device , dtype=_snake_case)
UpperCAmelCase_ = image.to(device=self.device , dtype=_snake_case)
# set timesteps and move to the correct device
self.scheduler.set_timesteps(_snake_case , device=self.device)
UpperCAmelCase_ = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
UpperCAmelCase_ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCAmelCase_ = '''eta''' in set(inspect.signature(self.scheduler.step).parameters.keys())
UpperCAmelCase_ = {}
if accepts_eta:
UpperCAmelCase_ = eta
for t in self.progress_bar(_snake_case):
# concat latents and low resolution image in the channel dimension.
UpperCAmelCase_ = torch.cat([latents, image] , dim=1)
UpperCAmelCase_ = self.scheduler.scale_model_input(_snake_case , _snake_case)
# predict the noise residual
UpperCAmelCase_ = self.unet(_snake_case , _snake_case).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ = self.scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample
# decode the image latents with the VQVAE
UpperCAmelCase_ = self.vqvae.decode(_snake_case).sample
UpperCAmelCase_ = torch.clamp(_snake_case , -1.0 , 1.0)
UpperCAmelCase_ = image / 2 + 0.5
UpperCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
UpperCAmelCase_ = self.numpy_to_pil(_snake_case)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_snake_case)
| 51 |
def _a ( SCREAMING_SNAKE_CASE_ : int = 1_00_00_00 ):
__lowerCAmelCase = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , SCREAMING_SNAKE_CASE_ ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 92 | 0 |
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase_ ( lowerCAmelCase: str , lowerCAmelCase: Dict , lowerCAmelCase: Optional[Any] , lowerCAmelCase: List[Any] , lowerCAmelCase: Tuple )-> Optional[int]:
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
_snake_case : Optional[int] = TapasConfig.from_json_file(lowerCAmelCase )
# set absolute/relative position embeddings parameter
_snake_case : Dict = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
_snake_case : int = TapasForQuestionAnswering(config=lowerCAmelCase )
elif task == "WTQ":
# run_task_main.py hparams
_snake_case : Tuple = 4
_snake_case : Dict = True
# hparam_utils.py hparams
_snake_case : int = 0.6_6_4_6_9_4
_snake_case : Union[str, Any] = 0.2_0_7_9_5_1
_snake_case : Union[str, Any] = 0.1_2_1_1_9_4
_snake_case : List[Any] = True
_snake_case : Optional[int] = True
_snake_case : Union[str, Any] = False
_snake_case : Optional[int] = 0.0_3_5_2_5_1_3
_snake_case : Dict = TapasForQuestionAnswering(config=lowerCAmelCase )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
_snake_case : Any = 4
_snake_case : List[str] = False
# hparam_utils.py hparams
_snake_case : Tuple = 3_6.4_5_1_9
_snake_case : str = 0.9_0_3_4_2_1
_snake_case : Any = 2_2_2.0_8_8
_snake_case : List[Any] = True
_snake_case : List[Any] = True
_snake_case : int = True
_snake_case : int = 0.7_6_3_1_4_1
_snake_case : Tuple = TapasForQuestionAnswering(config=lowerCAmelCase )
elif task == "TABFACT":
_snake_case : Union[str, Any] = TapasForSequenceClassification(config=lowerCAmelCase )
elif task == "MLM":
_snake_case : List[Any] = TapasForMaskedLM(config=lowerCAmelCase )
elif task == "INTERMEDIATE_PRETRAINING":
_snake_case : str = TapasModel(config=lowerCAmelCase )
else:
raise ValueError(F"""Task {task} not supported.""" )
print(F"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save pytorch-model (weights and configuration)
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(lowerCAmelCase )
# Save tokenizer files
print(F"""Save tokenizer files to {pytorch_dump_path}""" )
_snake_case : List[Any] = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + 'vocab.txt' , model_max_length=5_12 )
tokenizer.save_pretrained(lowerCAmelCase )
print('Used relative position embeddings:' , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA."""
)
parser.add_argument(
"""--reset_position_index_per_cell""",
default=False,
action="""store_true""",
help="""Whether to use relative position embeddings or not. Defaults to True.""",
)
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--tapas_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained TAPAS model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCAmelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 260 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase_ ( lowerCAmelCase: List[str] , lowerCAmelCase: Dict , lowerCAmelCase: str )-> List[str]:
# Initialise PyTorch model
_snake_case : Optional[Any] = MobileBertConfig.from_json_file(lowerCAmelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
_snake_case : Optional[int] = MobileBertForPreTraining(lowerCAmelCase )
# Load weights from tf checkpoint
_snake_case : Optional[int] = load_tf_weights_in_mobilebert(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , lowerCAmelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--mobilebert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained MobileBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCAmelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 260 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def a__ ( lowerCAmelCase__ ) -> Any:
UpperCAmelCase__ : List[Any] = SwinvaConfig()
UpperCAmelCase__ : List[Any] = swinva_name.split('''_''' )
UpperCAmelCase__ : Dict = name_split[1]
if "to" in name_split[3]:
UpperCAmelCase__ : Optional[Any] = int(name_split[3][-3:] )
else:
UpperCAmelCase__ : Tuple = int(name_split[3] )
if "to" in name_split[2]:
UpperCAmelCase__ : List[str] = int(name_split[2][-2:] )
else:
UpperCAmelCase__ : List[str] = int(name_split[2][6:] )
if model_size == "tiny":
UpperCAmelCase__ : Union[str, Any] = 96
UpperCAmelCase__ : Dict = (2, 2, 6, 2)
UpperCAmelCase__ : List[Any] = (3, 6, 12, 24)
elif model_size == "small":
UpperCAmelCase__ : Union[str, Any] = 96
UpperCAmelCase__ : Any = (2, 2, 18, 2)
UpperCAmelCase__ : List[str] = (3, 6, 12, 24)
elif model_size == "base":
UpperCAmelCase__ : Dict = 1_28
UpperCAmelCase__ : Optional[int] = (2, 2, 18, 2)
UpperCAmelCase__ : List[Any] = (4, 8, 16, 32)
else:
UpperCAmelCase__ : Dict = 1_92
UpperCAmelCase__ : Tuple = (2, 2, 18, 2)
UpperCAmelCase__ : int = (6, 12, 24, 48)
if "to" in swinva_name:
UpperCAmelCase__ : Optional[int] = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
UpperCAmelCase__ : Any = 2_18_41
UpperCAmelCase__ : Optional[Any] = '''huggingface/label-files'''
UpperCAmelCase__ : Optional[Any] = '''imagenet-22k-id2label.json'''
UpperCAmelCase__ : Dict = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase__ : Optional[Any] = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
UpperCAmelCase__ : int = idalabel
UpperCAmelCase__ : Optional[int] = {v: k for k, v in idalabel.items()}
else:
UpperCAmelCase__ : Optional[Any] = 10_00
UpperCAmelCase__ : Optional[int] = '''huggingface/label-files'''
UpperCAmelCase__ : Union[str, Any] = '''imagenet-1k-id2label.json'''
UpperCAmelCase__ : Tuple = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase__ : Tuple = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
UpperCAmelCase__ : int = idalabel
UpperCAmelCase__ : Optional[Any] = {v: k for k, v in idalabel.items()}
UpperCAmelCase__ : Optional[Any] = img_size
UpperCAmelCase__ : int = num_classes
UpperCAmelCase__ : Optional[Any] = embed_dim
UpperCAmelCase__ : List[str] = depths
UpperCAmelCase__ : Union[str, Any] = num_heads
UpperCAmelCase__ : Tuple = window_size
return config
def a__ ( lowerCAmelCase__ ) -> Any:
if "patch_embed.proj" in name:
UpperCAmelCase__ : int = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
UpperCAmelCase__ : int = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
UpperCAmelCase__ : str = '''encoder.''' + name
if "attn.proj" in name:
UpperCAmelCase__ : Any = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
UpperCAmelCase__ : int = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
UpperCAmelCase__ : Optional[int] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
UpperCAmelCase__ : str = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
UpperCAmelCase__ : Any = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCAmelCase__ : List[Any] = name.replace('''mlp.fc2''' , '''output.dense''' )
if "q_bias" in name:
UpperCAmelCase__ : Optional[Any] = name.replace('''q_bias''' , '''query.bias''' )
if "k_bias" in name:
UpperCAmelCase__ : Optional[Any] = name.replace('''k_bias''' , '''key.bias''' )
if "v_bias" in name:
UpperCAmelCase__ : Dict = name.replace('''v_bias''' , '''value.bias''' )
if "cpb_mlp" in name:
UpperCAmelCase__ : Optional[Any] = name.replace('''cpb_mlp''' , '''continuous_position_bias_mlp''' )
if name == "norm.weight":
UpperCAmelCase__ : List[str] = '''layernorm.weight'''
if name == "norm.bias":
UpperCAmelCase__ : int = '''layernorm.bias'''
if "head" in name:
UpperCAmelCase__ : Dict = name.replace('''head''' , '''classifier''' )
else:
UpperCAmelCase__ : str = '''swinv2.''' + name
return name
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
for key in orig_state_dict.copy().keys():
UpperCAmelCase__ : Tuple = orig_state_dict.pop(lowerCAmelCase__ )
if "mask" in key:
continue
elif "qkv" in key:
UpperCAmelCase__ : int = key.split('''.''' )
UpperCAmelCase__ : Optional[Any] = int(key_split[1] )
UpperCAmelCase__ : Union[str, Any] = int(key_split[3] )
UpperCAmelCase__ : Optional[int] = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCAmelCase__ : Dict = val[:dim, :]
UpperCAmelCase__ : Tuple = val[dim : dim * 2, :]
UpperCAmelCase__ : List[str] = val[-dim:, :]
else:
UpperCAmelCase__ : Tuple = val[:dim]
UpperCAmelCase__ : Tuple = val[
dim : dim * 2
]
UpperCAmelCase__ : Dict = val[-dim:]
else:
UpperCAmelCase__ : List[str] = val
return orig_state_dict
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
UpperCAmelCase__ : str = timm.create_model(lowerCAmelCase__ , pretrained=lowerCAmelCase__ )
timm_model.eval()
UpperCAmelCase__ : Union[str, Any] = get_swinva_config(lowerCAmelCase__ )
UpperCAmelCase__ : Union[str, Any] = SwinvaForImageClassification(lowerCAmelCase__ )
model.eval()
UpperCAmelCase__ : Optional[int] = convert_state_dict(timm_model.state_dict() , lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ )
UpperCAmelCase__ : Optional[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase__ : Union[str, Any] = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swinva_name.replace('''_''' , '''-''' ) ) )
UpperCAmelCase__ : int = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
UpperCAmelCase__ : Dict = image_processor(images=lowerCAmelCase__ , return_tensors='''pt''' )
UpperCAmelCase__ : Optional[int] = timm_model(inputs['''pixel_values'''] )
UpperCAmelCase__ : List[Any] = model(**lowerCAmelCase__ ).logits
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 )
print(F"""Saving model {swinva_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCAmelCase__ )
model.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase__ , lowerCAmelCase__ ) , organization='''nandwalritik''' , commit_message='''Add model''' , )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swinv2_name''',
default='''swinv2_tiny_patch4_window8_256''',
type=str,
help='''Name of the Swinv2 timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
UpperCamelCase__ = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 181 |
"""simple docstring"""
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = DistilBertTokenizer
__lowerCAmelCase = DistilBertTokenizerFast
__lowerCAmelCase = True
@slow
def SCREAMING_SNAKE_CASE ( self ) -> int:
a =DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' )
a =tokenizer.encode('''sequence builders''' , add_special_tokens=__A )
a =tokenizer.encode('''multi-sequence build''' , add_special_tokens=__A )
a =tokenizer.build_inputs_with_special_tokens(__A )
a =tokenizer.build_inputs_with_special_tokens(__A , __A )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
] | 81 | 0 |
import pytest
a__ : List[str] = '''__dummy_dataset1__'''
a__ : str = '''
import json
import os
import datasets
REPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"
URLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"O",
"B-PER",
"I-PER",
"B-ORG",
"I-ORG",
"B-LOC",
"I-LOC",
]
)
),
"langs": datasets.Sequence(datasets.Value("string")),
"spans": datasets.Sequence(datasets.Value("string")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),
]
def _generate_examples(self, filepath):
with open(filepath, "r", encoding="utf-8") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
'''
@pytest.fixture
def UpperCAmelCase_( ):
"""simple docstring"""
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def UpperCAmelCase_( ):
"""simple docstring"""
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = dataset_loading_script_name
SCREAMING_SNAKE_CASE : Dict = tmp_path / '''datasets''' / script_name
script_dir.mkdir(parents=a__ )
SCREAMING_SNAKE_CASE : Any = script_dir / F"""{script_name}.py"""
with open(a__ , '''w''' ) as f:
f.write(a__ )
return str(a__ )
| 19 |
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
SCREAMING_SNAKE_CASE : Tuple = 1
SCREAMING_SNAKE_CASE : Tuple = 1
while repunit:
SCREAMING_SNAKE_CASE : Dict = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def UpperCAmelCase_( a__ = 1_000_000 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(a__ ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F"{solution() = }")
| 19 | 1 |
"""simple docstring"""
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def a__ ( SCREAMING_SNAKE_CASE : int = 8 ):
'''simple docstring'''
lowerCAmelCase : List[str] = ascii_letters + digits + punctuation
return "".join(secrets.choice(SCREAMING_SNAKE_CASE ) for _ in range(SCREAMING_SNAKE_CASE ) )
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
i -= len(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Any = i // 3
lowerCAmelCase : List[Any] = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
lowerCAmelCase : Union[str, Any] = (
chars_incl
+ random(SCREAMING_SNAKE_CASE , quotient + remainder )
+ random(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
+ random(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
)
lowerCAmelCase : List[Any] = list(SCREAMING_SNAKE_CASE )
shuffle(SCREAMING_SNAKE_CASE )
return "".join(SCREAMING_SNAKE_CASE )
# random is a generalised function for letters, characters and numbers
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return "".join(secrets.choice(SCREAMING_SNAKE_CASE ) for _ in range(SCREAMING_SNAKE_CASE ) )
def a__ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
pass # Put your code here...
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
pass # Put your code here...
def a__ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
pass # Put your code here...
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int = 8 ):
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE ) < min_length:
# Your Password must be at least 8 characters long
return False
lowerCAmelCase : Optional[Any] = any(char in ascii_uppercase for char in password )
lowerCAmelCase : List[str] = any(char in ascii_lowercase for char in password )
lowerCAmelCase : Optional[int] = any(char in digits for char in password )
lowerCAmelCase : Union[str, Any] = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : int = int(input("Please indicate the max length of your password: " ).strip() )
lowerCAmelCase : int = input(
"Please indicate the characters that must be in your password: " ).strip()
print("Password generated:" , password_generator(SCREAMING_SNAKE_CASE ) )
print(
"Alternative Password generated:" , alternative_password_generator(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , )
print("[If you are thinking of using this passsword, You better save it.]" )
if __name__ == "__main__":
main()
| 108 | '''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class __A ( UpperCamelCase__ ):
def __init__(self : int , __a : Distribution , __a : Dict=None , __a : int=None , __a : Any=0 ):
UpperCAmelCase_ = 1.0 if scale is None else scale
UpperCAmelCase_ = 0.0 if loc is None else loc
super().__init__(__a , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__a )] )
@property
def _lowercase (self : Union[str, Any] ):
return self.base_dist.mean * self.scale + self.loc
@property
def _lowercase (self : List[Any] ):
return self.base_dist.variance * self.scale**2
@property
def _lowercase (self : List[Any] ):
return self.variance.sqrt()
class __A ( nn.Module ):
def __init__(self : Optional[int] , __a : int , __a : Dict[str, int] , __a : Callable[..., Tuple[torch.Tensor]] , **__a : List[str] ):
super().__init__(**__a )
UpperCAmelCase_ = args_dim
UpperCAmelCase_ = nn.ModuleList([nn.Linear(__a , __a ) for dim in args_dim.values()] )
UpperCAmelCase_ = domain_map
def _lowercase (self : List[str] , __a : torch.Tensor ):
UpperCAmelCase_ = [proj(__a ) for proj in self.proj]
return self.domain_map(*__a )
class __A ( nn.Module ):
def __init__(self : Union[str, Any] , __a : List[str] ):
super().__init__()
UpperCAmelCase_ = function
def _lowercase (self : Optional[int] , __a : List[str] , *__a : Optional[int] ):
return self.function(__a , *__a )
class __A :
a__ : type
a__ : int
a__ : Dict[str, int]
def __init__(self : List[Any] , __a : int = 1 ):
UpperCAmelCase_ = dim
UpperCAmelCase_ = {k: dim * self.args_dim[k] for k in self.args_dim}
def _lowercase (self : Any , __a : Any ):
if self.dim == 1:
return self.distribution_class(*__a )
else:
return Independent(self.distribution_class(*__a ) , 1 )
def _lowercase (self : List[str] , __a : Union[str, Any] , __a : Optional[torch.Tensor] = None , __a : Optional[torch.Tensor] = None , ):
UpperCAmelCase_ = self._base_distribution(__a )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(__a , loc=__a , scale=__a , event_dim=self.event_dim )
@property
def _lowercase (self : Any ):
return () if self.dim == 1 else (self.dim,)
@property
def _lowercase (self : Dict ):
return len(self.event_shape )
@property
def _lowercase (self : Tuple ):
return 0.0
def _lowercase (self : List[str] , __a : int ):
return ParameterProjection(
in_features=__a , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def _lowercase (self : Optional[int] , *__a : torch.Tensor ):
raise NotImplementedError()
@staticmethod
def _lowercase (__a : torch.Tensor ):
return (x + torch.sqrt(torch.square(__a ) + 4.0 )) / 2.0
class __A ( UpperCamelCase__ ):
a__ : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
a__ : type = StudentT
@classmethod
def _lowercase (cls : Union[str, Any] , __a : torch.Tensor , __a : torch.Tensor , __a : torch.Tensor ):
UpperCAmelCase_ = cls.squareplus(__a ).clamp_min(torch.finfo(scale.dtype ).eps )
UpperCAmelCase_ = 2.0 + cls.squareplus(__a )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class __A ( UpperCamelCase__ ):
a__ : Dict[str, int] = {"loc": 1, "scale": 1}
a__ : type = Normal
@classmethod
def _lowercase (cls : Tuple , __a : torch.Tensor , __a : torch.Tensor ):
UpperCAmelCase_ = cls.squareplus(__a ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class __A ( UpperCamelCase__ ):
a__ : Dict[str, int] = {"total_count": 1, "logits": 1}
a__ : type = NegativeBinomial
@classmethod
def _lowercase (cls : Optional[Any] , __a : torch.Tensor , __a : torch.Tensor ):
UpperCAmelCase_ = cls.squareplus(__a )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def _lowercase (self : List[str] , __a : str ):
UpperCAmelCase_ , UpperCAmelCase_ = distr_args
if self.dim == 1:
return self.distribution_class(total_count=__a , logits=__a )
else:
return Independent(self.distribution_class(total_count=__a , logits=__a ) , 1 )
def _lowercase (self : Optional[Any] , __a : int , __a : Optional[torch.Tensor] = None , __a : Optional[torch.Tensor] = None ):
UpperCAmelCase_ , UpperCAmelCase_ = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 1 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class __magic_name__ ( A__ ):
UpperCAmelCase =4_2
UpperCAmelCase =4_2
UpperCAmelCase =None
class __magic_name__ ( A__ ,A__ ):
UpperCAmelCase =2
@register_to_config
def __init__( self , snake_case = 0.02 , snake_case = 1_0_0 , snake_case = 1.0_07 , snake_case = 8_0 , snake_case = 0.05 , snake_case = 5_0 , ) -> List[Any]:
'''simple docstring'''
# standard deviation of the initial noise distribution
_UpperCAmelCase : Tuple =sigma_max
# setable values
_UpperCAmelCase : Optional[int] =None
_UpperCAmelCase : Tuple =None
_UpperCAmelCase : Any =None # sigma(t_i)
def lowerCAmelCase ( self , snake_case , snake_case = None) -> torch.FloatTensor:
'''simple docstring'''
return sample
def lowerCAmelCase ( self , snake_case , snake_case = None) -> int:
'''simple docstring'''
_UpperCAmelCase : Dict =num_inference_steps
_UpperCAmelCase : Optional[int] =np.arange(0 , self.num_inference_steps)[::-1].copy()
_UpperCAmelCase : List[str] =torch.from_numpy(__snake_case).to(__snake_case)
_UpperCAmelCase : Tuple =[
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
_UpperCAmelCase : Union[str, Any] =torch.tensor(__snake_case , dtype=torch.floataa , device=__snake_case)
def lowerCAmelCase ( self , snake_case , snake_case , snake_case = None) -> Tuple[torch.FloatTensor, float]:
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
_UpperCAmelCase : List[Any] =min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1)
else:
_UpperCAmelCase : List[Any] =0
# sample eps ~ N(0, S_noise^2 * I)
_UpperCAmelCase : str =self.config.s_noise * randn_tensor(sample.shape , generator=__snake_case).to(sample.device)
_UpperCAmelCase : Tuple =sigma + gamma * sigma
_UpperCAmelCase : Any =sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def lowerCAmelCase ( self , snake_case , snake_case , snake_case , snake_case , snake_case = True , ) -> Union[KarrasVeOutput, Tuple]:
'''simple docstring'''
_UpperCAmelCase : List[str] =sample_hat + sigma_hat * model_output
_UpperCAmelCase : int =(sample_hat - pred_original_sample) / sigma_hat
_UpperCAmelCase : Tuple =sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=__snake_case , derivative=__snake_case , pred_original_sample=__snake_case)
def lowerCAmelCase ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case = True , ) -> Union[KarrasVeOutput, Tuple]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] =sample_prev + sigma_prev * model_output
_UpperCAmelCase : Union[str, Any] =(sample_prev - pred_original_sample) / sigma_prev
_UpperCAmelCase : Optional[Any] =sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=__snake_case , derivative=__snake_case , pred_original_sample=__snake_case)
def lowerCAmelCase ( self , snake_case , snake_case , snake_case) -> Tuple:
'''simple docstring'''
raise NotImplementedError()
| 353 |
'''simple docstring'''
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowercase =logging.get_logger(__name__) # pylint: disable=invalid-name
class __magic_name__ ( lowerCAmelCase ,lowerCAmelCase ):
@register_to_config
def __init__( self , snake_case , snake_case = None , snake_case = None) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
_UpperCAmelCase : List[Any] =learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
_UpperCAmelCase : str =torch.zeros(snake_case , snake_case)
else:
_UpperCAmelCase : Tuple =None
_UpperCAmelCase : int =torch.nn.Parameter(snake_case)
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase =42
UpperCAmelCase =42
UpperCAmelCase =42
UpperCAmelCase =42
UpperCAmelCase =42
UpperCAmelCase =42
def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ) -> Tuple:
'''simple docstring'''
super().__init__()
self.register_modules(
vqvae=snake_case , transformer=snake_case , text_encoder=snake_case , tokenizer=snake_case , scheduler=snake_case , learned_classifier_free_sampling_embeddings=snake_case , )
def lowerCAmelCase ( self , snake_case , snake_case , snake_case) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : int =len(snake_case) if isinstance(snake_case , snake_case) else 1
# get prompt text embeddings
_UpperCAmelCase : Optional[Any] =self.tokenizer(
snake_case , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
_UpperCAmelCase : Union[str, Any] =text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_UpperCAmelCase : str =self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f" {self.tokenizer.model_max_length} tokens: {removed_text}")
_UpperCAmelCase : Union[str, Any] =text_input_ids[:, : self.tokenizer.model_max_length]
_UpperCAmelCase : Optional[int] =self.text_encoder(text_input_ids.to(self.device))[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
_UpperCAmelCase : List[str] =prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=snake_case)
# duplicate text embeddings for each generation per prompt
_UpperCAmelCase : Optional[Any] =prompt_embeds.repeat_interleave(snake_case , dim=0)
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
_UpperCAmelCase : Dict =self.learned_classifier_free_sampling_embeddings.embeddings
_UpperCAmelCase : Any =negative_prompt_embeds.unsqueeze(0).repeat(snake_case , 1 , 1)
else:
_UpperCAmelCase : str =[''] * batch_size
_UpperCAmelCase : Dict =text_input_ids.shape[-1]
_UpperCAmelCase : str =self.tokenizer(
snake_case , padding='max_length' , max_length=snake_case , truncation=snake_case , return_tensors='pt' , )
_UpperCAmelCase : str =self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# See comment for normalizing text embeddings
_UpperCAmelCase : Tuple =negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=snake_case)
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_UpperCAmelCase : int =negative_prompt_embeds.shape[1]
_UpperCAmelCase : List[str] =negative_prompt_embeds.repeat(1 , snake_case , 1)
_UpperCAmelCase : Optional[int] =negative_prompt_embeds.view(batch_size * num_images_per_prompt , snake_case , -1)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_UpperCAmelCase : str =torch.cat([negative_prompt_embeds, prompt_embeds])
return prompt_embeds
@torch.no_grad()
def __call__( self , snake_case , snake_case = 1_0_0 , snake_case = 5.0 , snake_case = 1.0 , snake_case = 1 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
if isinstance(snake_case , snake_case):
_UpperCAmelCase : Tuple =1
elif isinstance(snake_case , snake_case):
_UpperCAmelCase : int =len(snake_case)
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(snake_case)}")
_UpperCAmelCase : Optional[Any] =batch_size * num_images_per_prompt
_UpperCAmelCase : Union[str, Any] =guidance_scale > 1.0
_UpperCAmelCase : Any =self._encode_prompt(snake_case , snake_case , snake_case)
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(snake_case , snake_case) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(snake_case)}.")
# get the initial completely masked latents unless the user supplied it
_UpperCAmelCase : List[Any] =(batch_size, self.transformer.num_latent_pixels)
if latents is None:
_UpperCAmelCase : Optional[Any] =self.transformer.num_vector_embeds - 1
_UpperCAmelCase : Optional[int] =torch.full(snake_case , snake_case).to(self.device)
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'
f" {self.transformer.num_vector_embeds - 1} (inclusive).")
_UpperCAmelCase : Optional[Any] =latents.to(self.device)
# set timesteps
self.scheduler.set_timesteps(snake_case , device=self.device)
_UpperCAmelCase : int =self.scheduler.timesteps.to(self.device)
_UpperCAmelCase : Dict =latents
for i, t in enumerate(self.progress_bar(snake_case)):
# expand the sample if we are doing classifier free guidance
_UpperCAmelCase : Union[str, Any] =torch.cat([sample] * 2) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
_UpperCAmelCase : Optional[Any] =self.transformer(snake_case , encoder_hidden_states=snake_case , timestep=snake_case).sample
if do_classifier_free_guidance:
_UpperCAmelCase , _UpperCAmelCase : Dict =model_output.chunk(2)
_UpperCAmelCase : Dict =model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(snake_case , dim=1 , keepdim=snake_case)
_UpperCAmelCase : Any =self.truncate(snake_case , snake_case)
# remove `log(0)`'s (`-inf`s)
_UpperCAmelCase : int =model_output.clamp(-7_0)
# compute the previous noisy sample x_t -> x_t-1
_UpperCAmelCase : List[Any] =self.scheduler.step(snake_case , timestep=snake_case , sample=snake_case , generator=snake_case).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(snake_case , snake_case , snake_case)
_UpperCAmelCase : List[str] =self.vqvae.config.vq_embed_dim
_UpperCAmelCase : Optional[int] =(batch_size, self.transformer.height, self.transformer.width, embedding_channels)
_UpperCAmelCase : int =self.vqvae.quantize.get_codebook_entry(snake_case , shape=snake_case)
_UpperCAmelCase : str =self.vqvae.decode(snake_case , force_not_quantize=snake_case).sample
_UpperCAmelCase : str =(image / 2 + 0.5).clamp(0 , 1)
_UpperCAmelCase : Tuple =image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
_UpperCAmelCase : Optional[int] =self.numpy_to_pil(snake_case)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case)
def lowerCAmelCase ( self , snake_case , snake_case) -> torch.FloatTensor:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Dict =torch.sort(snake_case , 1 , descending=snake_case)
_UpperCAmelCase : Dict =torch.exp(snake_case)
_UpperCAmelCase : str =sorted_p_x_0.cumsum(dim=1) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
_UpperCAmelCase : Optional[int] =torch.full_like(keep_mask[:, 0:1, :] , snake_case)
_UpperCAmelCase : Any =torch.cat((all_true, keep_mask) , dim=1)
_UpperCAmelCase : Dict =keep_mask[:, :-1, :]
_UpperCAmelCase : Any =keep_mask.gather(1 , indices.argsort(1))
_UpperCAmelCase : str =log_p_x_0.clone()
_UpperCAmelCase : Any =-torch.inf # -inf = log(0)
return rv
| 242 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''EleutherAI/gpt-j-6B''': '''https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json''',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class UpperCamelCase_ (__A ):
__magic_name__ = '''gptj'''
__magic_name__ = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : List[Any] , lowerCAmelCase_ : List[Any]=50_400 , lowerCAmelCase_ : int=2_048 , lowerCAmelCase_ : Tuple=4_096 , lowerCAmelCase_ : Optional[Any]=28 , lowerCAmelCase_ : List[str]=16 , lowerCAmelCase_ : Dict=64 , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : List[str]="gelu_new" , lowerCAmelCase_ : Optional[int]=0.0 , lowerCAmelCase_ : Tuple=0.0 , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Dict=1e-5 , lowerCAmelCase_ : Optional[int]=0.0_2 , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : int=50_256 , lowerCAmelCase_ : Dict=50_256 , lowerCAmelCase_ : List[str]=False , **lowerCAmelCase_ : Tuple , ) -> Dict:
UpperCAmelCase_ : List[str] = vocab_size
UpperCAmelCase_ : Any = n_positions
UpperCAmelCase_ : List[str] = n_embd
UpperCAmelCase_ : Any = n_layer
UpperCAmelCase_ : Dict = n_head
UpperCAmelCase_ : List[Any] = n_inner
UpperCAmelCase_ : Optional[Any] = rotary_dim
UpperCAmelCase_ : Optional[Any] = activation_function
UpperCAmelCase_ : List[str] = resid_pdrop
UpperCAmelCase_ : int = embd_pdrop
UpperCAmelCase_ : List[Any] = attn_pdrop
UpperCAmelCase_ : int = layer_norm_epsilon
UpperCAmelCase_ : Dict = initializer_range
UpperCAmelCase_ : str = use_cache
UpperCAmelCase_ : Optional[Any] = bos_token_id
UpperCAmelCase_ : Tuple = eos_token_id
super().__init__(
bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , tie_word_embeddings=lowerCAmelCase_ , **lowerCAmelCase_ )
class UpperCamelCase_ (__A ):
def __init__( self : Union[str, Any] , lowerCAmelCase_ : PretrainedConfig , lowerCAmelCase_ : str = "default" , lowerCAmelCase_ : List[PatchingSpec] = None , lowerCAmelCase_ : bool = False , ) -> List[Any]:
super().__init__(lowerCAmelCase_ , task=lowerCAmelCase_ , patching_specs=lowerCAmelCase_ , use_past=lowerCAmelCase_ )
if not getattr(self._config , "pad_token_id" , lowerCAmelCase_ ):
# TODO: how to do that better?
UpperCAmelCase_ : Union[str, Any] = 0
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
UpperCAmelCase_ : Tuple = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase_ , direction="inputs" )
UpperCAmelCase_ : Optional[int] = {0: "batch", 1: "past_sequence + sequence"}
else:
UpperCAmelCase_ : Optional[Any] = {0: "batch", 1: "sequence"}
return common_inputs
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
return self._config.n_layer
@property
def _SCREAMING_SNAKE_CASE ( self : str ) -> int:
return self._config.n_head
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : PreTrainedTokenizer , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
UpperCAmelCase_ : Union[str, Any] = super(lowerCAmelCase_ , self ).generate_dummy_inputs(
lowerCAmelCase_ , batch_size=lowerCAmelCase_ , seq_length=lowerCAmelCase_ , is_pair=lowerCAmelCase_ , framework=lowerCAmelCase_ )
# We need to order the input in the way they appears in the forward()
UpperCAmelCase_ : List[Any] = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
UpperCAmelCase_ : Optional[Any] = seqlen + 2
UpperCAmelCase_ : Union[str, Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
UpperCAmelCase_ : int = [
(torch.zeros(lowerCAmelCase_ ), torch.zeros(lowerCAmelCase_ )) for _ in range(self.num_layers )
]
UpperCAmelCase_ : Union[str, Any] = common_inputs["attention_mask"]
if self.use_past:
UpperCAmelCase_ : Optional[int] = ordered_inputs["attention_mask"].dtype
UpperCAmelCase_ : Union[str, Any] = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(lowerCAmelCase_ , lowerCAmelCase_ , dtype=lowerCAmelCase_ )] , dim=1 )
return ordered_inputs
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
return 13
| 268 |
"""simple docstring"""
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase_ (__A ):
def __init__( self : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any]=13 , lowerCAmelCase_ : Tuple=7 , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[str]=99 , lowerCAmelCase_ : int=32 , lowerCAmelCase_ : List[str]=5 , lowerCAmelCase_ : Optional[int]=4 , lowerCAmelCase_ : str=37 , lowerCAmelCase_ : List[Any]="gelu" , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : List[Any]=512 , lowerCAmelCase_ : Optional[int]=16 , lowerCAmelCase_ : Union[str, Any]=2 , lowerCAmelCase_ : List[str]=0.0_2 , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Union[str, Any]="None" , lowerCAmelCase_ : List[Any]=3 , lowerCAmelCase_ : Optional[int]=4 , lowerCAmelCase_ : int=None , ) -> Dict:
UpperCAmelCase_ : Dict = parent
UpperCAmelCase_ : Union[str, Any] = batch_size
UpperCAmelCase_ : Optional[Any] = seq_length
UpperCAmelCase_ : List[Any] = is_training
UpperCAmelCase_ : Optional[int] = use_input_mask
UpperCAmelCase_ : int = use_token_type_ids
UpperCAmelCase_ : Any = use_labels
UpperCAmelCase_ : Optional[int] = vocab_size
UpperCAmelCase_ : Any = hidden_size
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : List[Any] = num_attention_heads
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : int = hidden_act
UpperCAmelCase_ : Optional[Any] = hidden_dropout_prob
UpperCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Any = max_position_embeddings
UpperCAmelCase_ : Union[str, Any] = type_vocab_size
UpperCAmelCase_ : Union[str, Any] = type_sequence_label_size
UpperCAmelCase_ : Tuple = initializer_range
UpperCAmelCase_ : int = num_labels
UpperCAmelCase_ : Optional[Any] = num_choices
UpperCAmelCase_ : List[str] = relative_attention
UpperCAmelCase_ : List[Any] = position_biased_input
UpperCAmelCase_ : Dict = pos_att_type
UpperCAmelCase_ : Optional[Any] = scope
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Tuple = None
if self.use_input_mask:
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCAmelCase_ : Optional[Any] = None
if self.use_token_type_ids:
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = self.get_config()
UpperCAmelCase_ : int = 300
return config
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : int ) -> List[Any]:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] ) -> List[Any]:
UpperCAmelCase_ : Optional[Any] = DebertaModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0]
UpperCAmelCase_ : Optional[int] = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0]
UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[int] ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = DebertaForMaskedLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] ) -> Optional[Any]:
UpperCAmelCase_ : Any = self.num_labels
UpperCAmelCase_ : List[Any] = DebertaForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple ) -> str:
UpperCAmelCase_ : Optional[int] = self.num_labels
UpperCAmelCase_ : Optional[int] = DebertaForTokenClassification(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : str ) -> List[Any]:
UpperCAmelCase_ : Dict = DebertaForQuestionAnswering(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase_ : Any = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase_ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Tuple = config_and_inputs
UpperCAmelCase_ : List[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ (__A , __A , unittest.TestCase ):
__magic_name__ = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
__magic_name__ = (
{
'''feature-extraction''': DebertaModel,
'''fill-mask''': DebertaForMaskedLM,
'''question-answering''': DebertaForQuestionAnswering,
'''text-classification''': DebertaForSequenceClassification,
'''token-classification''': DebertaForTokenClassification,
'''zero-shot''': DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
__magic_name__ = True
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
UpperCAmelCase_ : int = DebertaModelTester(self )
UpperCAmelCase_ : Any = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*lowerCAmelCase_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Optional[int] = DebertaModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ (unittest.TestCase ):
@unittest.skip(reason="Model not available yet" )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
pass
@slow
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = DebertaModel.from_pretrained("microsoft/deberta-base" )
UpperCAmelCase_ : List[Any] = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
UpperCAmelCase_ : Tuple = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0]
# compare the actual values for a slice.
UpperCAmelCase_ : Tuple = torch.tensor(
[[[-0.5_9_8_6, -0.8_0_5_5, -0.8_4_6_2], [1.4_4_8_4, -0.9_3_4_8, -0.8_0_5_9], [0.3_1_2_3, 0.0_0_3_2, -1.4_1_3_1]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1e-4 ) , f"""{output[:, 1:4, 1:4]}""" )
| 268 | 1 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = '''segformer'''
def __init__( self : Any , UpperCamelCase__ : Optional[Any]=3 , UpperCamelCase__ : List[Any]=4 , UpperCamelCase__ : Union[str, Any]=[2, 2, 2, 2] , UpperCamelCase__ : List[str]=[8, 4, 2, 1] , UpperCamelCase__ : List[Any]=[3_2, 6_4, 1_6_0, 2_5_6] , UpperCamelCase__ : Optional[int]=[7, 3, 3, 3] , UpperCamelCase__ : str=[4, 2, 2, 2] , UpperCamelCase__ : Union[str, Any]=[1, 2, 5, 8] , UpperCamelCase__ : Optional[Any]=[4, 4, 4, 4] , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : Optional[int]=0.0 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Optional[int]=0.0_2 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Dict=1E-6 , UpperCamelCase__ : Tuple=2_5_6 , UpperCamelCase__ : Optional[int]=2_5_5 , **UpperCamelCase__ : Tuple , ):
"""simple docstring"""
super().__init__(**_a )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'
' removed, as the behaviour will default to that of reshape_last_stage = True.' , _a , )
UpperCamelCase = num_channels
UpperCamelCase = num_encoder_blocks
UpperCamelCase = depths
UpperCamelCase = sr_ratios
UpperCamelCase = hidden_sizes
UpperCamelCase = patch_sizes
UpperCamelCase = strides
UpperCamelCase = mlp_ratios
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = classifier_dropout_prob
UpperCamelCase = initializer_range
UpperCamelCase = drop_path_rate
UpperCamelCase = layer_norm_eps
UpperCamelCase = decoder_hidden_size
UpperCamelCase = kwargs.get('reshape_last_stage' , _a )
UpperCamelCase = semantic_loss_ignore_index
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = version.parse("""1.11""" )
@property
def A ( self : List[str] ):
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def A ( self : Optional[Any] ):
"""simple docstring"""
return 1E-4
@property
def A ( self : Tuple ):
"""simple docstring"""
return 1_2
| 363 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __lowerCamelCase ( A__ , A__ , A__ , A__ , A__ ) -> Optional[Any]:
"""simple docstring"""
# Load configuration defined in the metadata file
with open(A__ ) as metadata_file:
UpperCamelCase = json.load(A__ )
UpperCamelCase = LukeConfig(use_entity_aware_attention=A__ , **metadata['model_config'] )
# Load in the weights from the checkpoint_path
UpperCamelCase = torch.load(A__ , map_location='cpu' )['module']
# Load the entity vocab file
UpperCamelCase = load_original_entity_vocab(A__ )
# add an entry for [MASK2]
UpperCamelCase = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
UpperCamelCase = XLMRobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
UpperCamelCase = AddedToken('<ent>' , lstrip=A__ , rstrip=A__ )
UpperCamelCase = AddedToken('<ent2>' , lstrip=A__ , rstrip=A__ )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(A__ )
with open(os.path.join(A__ , 'tokenizer_config.json' ) , 'r' ) as f:
UpperCamelCase = json.load(A__ )
UpperCamelCase = 'MLukeTokenizer'
with open(os.path.join(A__ , 'tokenizer_config.json' ) , 'w' ) as f:
json.dump(A__ , A__ )
with open(os.path.join(A__ , MLukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f:
json.dump(A__ , A__ )
UpperCamelCase = MLukeTokenizer.from_pretrained(A__ )
# Initialize the embeddings of the special tokens
UpperCamelCase = tokenizer.convert_tokens_to_ids(['@'] )[0]
UpperCamelCase = tokenizer.convert_tokens_to_ids(['#'] )[0]
UpperCamelCase = state_dict['embeddings.word_embeddings.weight']
UpperCamelCase = word_emb[ent_init_index].unsqueeze(0 )
UpperCamelCase = word_emb[enta_init_index].unsqueeze(0 )
UpperCamelCase = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
UpperCamelCase = state_dict[bias_name]
UpperCamelCase = decoder_bias[ent_init_index].unsqueeze(0 )
UpperCamelCase = decoder_bias[enta_init_index].unsqueeze(0 )
UpperCamelCase = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
UpperCamelCase = F"""encoder.layer.{layer_index}.attention.self."""
UpperCamelCase = state_dict[prefix + matrix_name]
UpperCamelCase = state_dict[prefix + matrix_name]
UpperCamelCase = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
UpperCamelCase = state_dict['entity_embeddings.entity_embeddings.weight']
UpperCamelCase = entity_emb[entity_vocab['[MASK]']].unsqueeze(0 )
UpperCamelCase = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
UpperCamelCase = state_dict['entity_predictions.bias']
UpperCamelCase = entity_prediction_bias[entity_vocab['[MASK]']].unsqueeze(0 )
UpperCamelCase = torch.cat([entity_prediction_bias, entity_mask_bias] )
UpperCamelCase = LukeForMaskedLM(config=A__ ).eval()
state_dict.pop('entity_predictions.decoder.weight' )
state_dict.pop('lm_head.decoder.weight' )
state_dict.pop('lm_head.decoder.bias' )
UpperCamelCase = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('lm_head' ) or key.startswith('entity_predictions' )):
UpperCamelCase = state_dict[key]
else:
UpperCamelCase = state_dict[key]
UpperCamelCase , UpperCamelCase = model.load_state_dict(A__ , strict=A__ )
if set(A__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(A__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
UpperCamelCase = MLukeTokenizer.from_pretrained(A__ , task='entity_classification' )
UpperCamelCase = 'ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'
UpperCamelCase = (0, 9)
UpperCamelCase = tokenizer(A__ , entity_spans=[span] , return_tensors='pt' )
UpperCamelCase = model(**A__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
UpperCamelCase = torch.Size((1, 33, 768) )
UpperCamelCase = torch.tensor([[0.0_892, 0.0_596, -0.2_819], [0.0_134, 0.1_199, 0.0_573], [-0.0_169, 0.0_927, 0.0_644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , A__ , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
UpperCamelCase = torch.Size((1, 1, 768) )
UpperCamelCase = torch.tensor([[-0.1_482, 0.0_609, 0.0_322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , A__ , atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
UpperCamelCase = MLukeTokenizer.from_pretrained(A__ )
UpperCamelCase = 'Tokyo is the capital of <mask>.'
UpperCamelCase = (24, 30)
UpperCamelCase = tokenizer(A__ , entity_spans=[span] , return_tensors='pt' )
UpperCamelCase = model(**A__ )
UpperCamelCase = encoding['input_ids'][0].tolist()
UpperCamelCase = input_ids.index(tokenizer.convert_tokens_to_ids('<mask>' ) )
UpperCamelCase = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(A__ )
UpperCamelCase = outputs.entity_logits[0][0].argmax().item()
UpperCamelCase = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('en:' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(A__ ) )
model.save_pretrained(A__ )
def __lowerCamelCase ( A__ ) -> int:
"""simple docstring"""
UpperCamelCase = ['[MASK]', '[PAD]', '[UNK]']
UpperCamelCase = [json.loads(A__ ) for line in open(A__ )]
UpperCamelCase = {}
for entry in data:
UpperCamelCase = entry['id']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
UpperCamelCase = entity_id
break
UpperCamelCase = F"""{language}:{entity_name}"""
UpperCamelCase = entity_id
return new_mapping
if __name__ == "__main__":
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
_lowerCamelCase : Optional[Any] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 249 | 0 |
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _a ( lowerCAmelCase , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = VideoToVideoSDPipeline
UpperCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"""video"""}) - {"""image""", """width""", """height"""}
UpperCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""video"""}) - {"""image"""}
UpperCamelCase__ = PipelineTesterMixin.required_optional_params - {"""latents"""}
UpperCamelCase__ = False
# No `output_type`.
UpperCamelCase__ = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
])
def lowercase__ ( self : int )->str:
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4, 6_4, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') , up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') , cross_attention_dim=3_2 , attention_head_dim=4 , )
_UpperCAmelCase = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=__UpperCamelCase , set_alpha_to_one=__UpperCamelCase , )
torch.manual_seed(0 )
_UpperCAmelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , )
_UpperCAmelCase = CLIPTextModel(__UpperCamelCase )
_UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_UpperCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def lowercase__ ( self : List[str] , __UpperCamelCase : List[str] , __UpperCamelCase : Dict=0 )->Union[str, Any]:
# 3 frames
_UpperCAmelCase = floats_tensor((1, 3, 3, 3_2, 3_2) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
if str(__UpperCamelCase ).startswith('''mps''' ):
_UpperCAmelCase = torch.manual_seed(__UpperCamelCase )
else:
_UpperCAmelCase = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
_UpperCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''video''': video,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''pt''',
}
return inputs
def lowercase__ ( self : List[Any] )->List[str]:
_UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = VideoToVideoSDPipeline(**__UpperCamelCase )
_UpperCAmelCase = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
_UpperCAmelCase = self.get_dummy_inputs(__UpperCamelCase )
_UpperCAmelCase = '''np'''
_UpperCAmelCase = sd_pipe(**__UpperCamelCase ).frames
_UpperCAmelCase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (3_2, 3_2, 3)
_UpperCAmelCase = np.array([1_0_6, 1_1_7, 1_1_3, 1_7_4, 1_3_7, 1_1_2, 1_4_8, 1_5_1, 1_3_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowercase__ ( self : Union[str, Any] )->List[str]:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__UpperCamelCase , expected_max_diff=5e-3 )
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def lowercase__ ( self : str )->Union[str, Any]:
pass
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def lowercase__ ( self : Optional[int] )->Any:
pass
@unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' )
def lowercase__ ( self : List[str] )->Any:
pass
def lowercase__ ( self : List[str] )->str:
return super().test_progress_bar()
@slow
@skip_mps
class _a ( unittest.TestCase):
"""simple docstring"""
def lowercase__ ( self : Any )->int:
_UpperCAmelCase = VideoToVideoSDPipeline.from_pretrained('''cerspense/zeroscope_v2_XL''' , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
_UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
_UpperCAmelCase = torch.randn((1, 1_0, 3, 1_0_2_4, 5_7_6) , generator=__UpperCamelCase )
_UpperCAmelCase = video.to('''cuda''' )
_UpperCAmelCase = '''Spiderman is surfing'''
_UpperCAmelCase = pipe(__UpperCamelCase , video=__UpperCamelCase , generator=__UpperCamelCase , num_inference_steps=3 , output_type='''pt''' ).frames
_UpperCAmelCase = np.array([-1.0_4_5_8_9_8_4, -1.1_2_7_9_2_9_7, -0.9_6_6_3_0_8_6, -0.9_1_5_0_3_9_0_6, -0.7_5_0_9_7_6_5_6] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1e-2
| 260 |
"""simple docstring"""
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__A : int = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt")
def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : int = 1_6000 ):
'''simple docstring'''
_UpperCAmelCase = int(round(sample_rate * max_length ) )
if len(_SCREAMING_SNAKE_CASE ) <= sample_length:
return wav
_UpperCAmelCase = randint(0 , len(_SCREAMING_SNAKE_CASE ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = field(default=lowerCAmelCase , metadata={"""help""": """Name of a dataset from the datasets package"""})
UpperCamelCase__ = field(
default=lowerCAmelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""})
UpperCamelCase__ = field(
default=lowerCAmelCase , metadata={"""help""": """A file containing the training audio paths and labels."""})
UpperCamelCase__ = field(
default=lowerCAmelCase , metadata={"""help""": """A file containing the validation audio paths and labels."""})
UpperCamelCase__ = field(
default="""train""" , metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'"""
} , )
UpperCamelCase__ = field(
default="""validation""" , metadata={
"""help""": (
"""The name of the training data set split to use (via the datasets library). Defaults to 'validation'"""
)
} , )
UpperCamelCase__ = field(
default="""audio""" , metadata={"""help""": """The name of the dataset column containing the audio data. Defaults to 'audio'"""} , )
UpperCamelCase__ = field(
default="""label""" , metadata={"""help""": """The name of the dataset column containing the labels. Defaults to 'label'"""})
UpperCamelCase__ = field(
default=lowerCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
UpperCamelCase__ = field(
default=lowerCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
UpperCamelCase__ = field(
default=20 , metadata={"""help""": """Audio clips will be randomly cut to this length during training if the value is set."""} , )
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = field(
default="""facebook/wav2vec2-base""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , )
UpperCamelCase__ = field(
default=lowerCAmelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""})
UpperCamelCase__ = field(
default=lowerCAmelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from the Hub"""})
UpperCamelCase__ = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
UpperCamelCase__ = field(
default=lowerCAmelCase , metadata={"""help""": """Name or path of preprocessor config."""})
UpperCamelCase__ = field(
default=lowerCAmelCase , metadata={"""help""": """Whether to freeze the feature encoder layers of the model."""})
UpperCamelCase__ = field(
default=lowerCAmelCase , metadata={"""help""": """Whether to generate an attention mask in the feature extractor."""})
UpperCamelCase__ = field(
default=lowerCAmelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
UpperCamelCase__ = field(
default=lowerCAmelCase , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""})
UpperCamelCase__ = field(
default=lowerCAmelCase , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def lowercase__ ( self : Optional[Any] )->int:
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''will be removed in a future version. Use `--freeze_feature_encoder`'''
'''instead. Setting `freeze_feature_encoder==True`.''' , __UpperCamelCase , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''should not be used in combination with `--freeze_feature_encoder`.'''
'''Only make use of `--freeze_feature_encoder`.''' )
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_audio_classification''' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_UpperCAmelCase = training_args.get_process_log_level()
logger.setLevel(_SCREAMING_SNAKE_CASE )
transformers.utils.logging.set_verbosity(_SCREAMING_SNAKE_CASE )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
_UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
'''Use --overwrite_output_dir to train from scratch.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset and prepare it for the audio classification task.
_UpperCAmelCase = DatasetDict()
_UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
_UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f'--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '
'''Make sure to set `--audio_column_name` to the correct audio column - one of '''
f'{", ".join(raw_datasets["train"].column_names )}.' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f'--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '
'''Make sure to set `--label_column_name` to the correct text column - one of '''
f'{", ".join(raw_datasets["train"].column_names )}.' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
_UpperCAmelCase = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
_UpperCAmelCase = feature_extractor.model_input_names[0]
def train_transforms(_SCREAMING_SNAKE_CASE : Tuple ):
_UpperCAmelCase = []
for audio in batch[data_args.audio_column_name]:
_UpperCAmelCase = random_subsample(
audio['''array'''] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = feature_extractor(_SCREAMING_SNAKE_CASE , sampling_rate=feature_extractor.sampling_rate )
_UpperCAmelCase = {model_input_name: inputs.get(_SCREAMING_SNAKE_CASE )}
_UpperCAmelCase = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(_SCREAMING_SNAKE_CASE : Optional[int] ):
_UpperCAmelCase = [audio['''array'''] for audio in batch[data_args.audio_column_name]]
_UpperCAmelCase = feature_extractor(_SCREAMING_SNAKE_CASE , sampling_rate=feature_extractor.sampling_rate )
_UpperCAmelCase = {model_input_name: inputs.get(_SCREAMING_SNAKE_CASE )}
_UpperCAmelCase = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
_UpperCAmelCase = raw_datasets['''train'''].features[data_args.label_column_name].names
_UpperCAmelCase , _UpperCAmelCase = {}, {}
for i, label in enumerate(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = str(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = label
# Load the accuracy metric from the datasets package
_UpperCAmelCase = evaluate.load('''accuracy''' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(_SCREAMING_SNAKE_CASE : List[str] ):
_UpperCAmelCase = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=_SCREAMING_SNAKE_CASE , references=eval_pred.label_ids )
_UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_SCREAMING_SNAKE_CASE ) , labelaid=_SCREAMING_SNAKE_CASE , idalabel=_SCREAMING_SNAKE_CASE , finetuning_task='''audio-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_UpperCAmelCase = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
_UpperCAmelCase = (
raw_datasets['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(_SCREAMING_SNAKE_CASE , output_all_columns=_SCREAMING_SNAKE_CASE )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
_UpperCAmelCase = (
raw_datasets['''eval'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(_SCREAMING_SNAKE_CASE , output_all_columns=_SCREAMING_SNAKE_CASE )
# Initialize our trainer
_UpperCAmelCase = Trainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=raw_datasets['''train'''] if training_args.do_train else None , eval_dataset=raw_datasets['''eval'''] if training_args.do_eval else None , compute_metrics=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
_UpperCAmelCase = None
if training_args.resume_from_checkpoint is not None:
_UpperCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_UpperCAmelCase = last_checkpoint
_UpperCAmelCase = trainer.train(resume_from_checkpoint=_SCREAMING_SNAKE_CASE )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_UpperCAmelCase = trainer.evaluate()
trainer.log_metrics('''eval''' , _SCREAMING_SNAKE_CASE )
trainer.save_metrics('''eval''' , _SCREAMING_SNAKE_CASE )
# Write model card and (optionally) push to hub
_UpperCAmelCase = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''audio-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''audio-classification'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_SCREAMING_SNAKE_CASE )
else:
trainer.create_model_card(**_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 260 | 1 |
'''simple docstring'''
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def UpperCAmelCase_ ( __lowercase : str ) -> str:
'''simple docstring'''
def wrapper(*__lowercase : Optional[int] , **__lowercase : Any ):
_UpperCAmelCase = timeit.default_timer()
_UpperCAmelCase = func(*__lowercase , **__lowercase )
_UpperCAmelCase = timeit.default_timer() - starttime
return delta
_UpperCAmelCase = func.__name__
return wrapper
def UpperCAmelCase_ ( __lowercase : dict , __lowercase : List[Any]=100 , __lowercase : Union[str, Any]=None ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = []
_UpperCAmelCase = seq_shapes or {}
for i in range(__lowercase ):
_UpperCAmelCase = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(__lowercase , _ArrayXD ):
_UpperCAmelCase = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(__lowercase , datasets.Value ):
if v.dtype == "string":
_UpperCAmelCase = "The small grey turtle was surprisingly fast when challenged."
else:
_UpperCAmelCase = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(__lowercase , datasets.Sequence ):
while isinstance(__lowercase , datasets.Sequence ):
_UpperCAmelCase = v.feature
_UpperCAmelCase = seq_shapes[k]
_UpperCAmelCase = np.random.rand(*__lowercase ).astype(v.dtype )
_UpperCAmelCase = data
dummy_data.append((i, example) )
return dummy_data
def UpperCAmelCase_ ( __lowercase : List[str] , __lowercase : List[Any] , __lowercase : str=100 , __lowercase : Union[str, Any]=None ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = generate_examples(__lowercase , num_examples=__lowercase , seq_shapes=__lowercase )
with ArrowWriter(features=__lowercase , path=__lowercase ) as writer:
for key, record in dummy_data:
_UpperCAmelCase = features.encode_example(__lowercase )
writer.write(__lowercase )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f'Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.' )
_UpperCAmelCase = datasets.Dataset.from_file(filename=__lowercase , info=datasets.DatasetInfo(features=__lowercase ) )
return dataset
| 156 |
'''simple docstring'''
def UpperCAmelCase_ ( __lowercase : list ) -> list:
'''simple docstring'''
_UpperCAmelCase = False
while is_sorted is False: # Until all the indices are traversed keep looping
_UpperCAmelCase = True
for i in range(0 , len(__lowercase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
_UpperCAmelCase , _UpperCAmelCase = input_list[i + 1], input_list[i]
# swapping if elements not in order
_UpperCAmelCase = False
for i in range(1 , len(__lowercase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
_UpperCAmelCase , _UpperCAmelCase = input_list[i + 1], input_list[i]
# swapping if elements not in order
_UpperCAmelCase = False
return input_list
if __name__ == "__main__":
print('''Enter list to be sorted''')
__SCREAMING_SNAKE_CASE :Optional[Any] = [int(x) for x in input().split()]
# inputing elements of the list in one line
__SCREAMING_SNAKE_CASE :List[str] = odd_even_sort(input_list)
print('''The sorted list is''')
print(sorted_list)
| 156 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = tempfile.mkdtemp()
# fmt: off
lowerCamelCase_ = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
lowerCamelCase_ = dict(zip(lowercase , range(len(lowercase ) ) ) )
lowerCamelCase_ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
lowerCamelCase_ = {"unk_token": "<unk>"}
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowercase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowercase ) )
lowerCamelCase_ = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"image_std": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
lowerCamelCase_ = os.path.join(self.tmpdirname , lowercase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(lowercase , lowercase )
def SCREAMING_SNAKE_CASE_( self , **lowercase ) -> Optional[int]:
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowercase )
def SCREAMING_SNAKE_CASE_( self , **lowercase ) -> List[str]:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowercase )
def SCREAMING_SNAKE_CASE_( self , **lowercase ) -> Union[str, Any]:
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase_ = [Image.fromarray(np.moveaxis(lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase_ = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase )
lowerCamelCase_ = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase_ = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowercase )
self.assertIsInstance(processor_fast.tokenizer , lowercase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowercase )
self.assertIsInstance(processor_fast.image_processor , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowerCamelCase_ = self.get_image_processor(do_normalize=lowercase , padding_value=1.0 )
lowerCamelCase_ = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase )
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = image_processor(lowercase , return_tensors="np" )
lowerCamelCase_ = processor(images=lowercase , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase )
lowerCamelCase_ = "lower newer"
lowerCamelCase_ = processor(text=lowercase )
lowerCamelCase_ = tokenizer(lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase )
lowerCamelCase_ = "lower newer"
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=lowercase , images=lowercase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowercase ):
processor()
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase )
lowerCamelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase_ = processor.batch_decode(lowercase )
lowerCamelCase_ = tokenizer.batch_decode(lowercase )
self.assertListEqual(lowercase , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase )
lowerCamelCase_ = "lower newer"
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=lowercase , images=lowercase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 19 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
__A =pytest.mark.integration
@require_faiss
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(lowercase ) for x in np.arange(30 ).tolist()]} )
return dset
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
import faiss
lowerCamelCase_ = self._create_dummy_dataset()
lowerCamelCase_ = dset.map(
lambda lowercase , lowercase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=lowercase , keep_in_memory=lowercase )
lowerCamelCase_ = dset.add_faiss_index("vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
dset.drop_index("vecs" )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
import faiss
lowerCamelCase_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
import faiss
lowerCamelCase_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase ) as tmp_file:
dset.save_faiss_index("vecs" , tmp_file.name )
dset.load_faiss_index("vecs2" , tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" )
dset.drop_index("vecs" )
self.assertRaises(lowercase , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
from elasticsearch import Elasticsearch
lowerCamelCase_ = self._create_dummy_dataset()
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
lowerCamelCase_ = {"acknowledged": True}
mocked_bulk.return_value([(True, None)] * 30 )
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 29}]}}
lowerCamelCase_ = Elasticsearch()
dset.add_elasticsearch_index("filename" , es_client=lowercase )
lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples("filename" , "my_name-train_29" )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
@require_faiss
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
import faiss
lowerCamelCase_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
lowerCamelCase_ = np.zeros(5 , dtype=np.floataa )
lowerCamelCase_ = 1
lowerCamelCase_ , lowerCamelCase_ = index.search(lowercase )
self.assertRaises(lowercase , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
lowerCamelCase_ = np.eye(5 , dtype=np.floataa )[::-1]
lowerCamelCase_ , lowerCamelCase_ = index.search_batch(lowercase )
self.assertRaises(lowercase , index.search_batch , queries[0] )
lowerCamelCase_ = [scores[0] for scores in total_scores]
lowerCamelCase_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
import faiss
lowerCamelCase_ = FaissIndex(string_factory="Flat" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
lowerCamelCase_ = FaissIndex(string_factory="LSH" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(lowercase ):
lowerCamelCase_ = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
import faiss
lowerCamelCase_ = faiss.IndexFlat(5 )
lowerCamelCase_ = FaissIndex(custom_index=lowercase )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
import faiss
lowerCamelCase_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase ) as tmp_file:
index.save(tmp_file.name )
lowerCamelCase_ = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase_ = np.zeros(5 , dtype=np.floataa )
lowerCamelCase_ = 1
lowerCamelCase_ , lowerCamelCase_ = index.search(lowercase )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def lowerCamelCase_ ( lowerCamelCase__ ):
import faiss
lowerCamelCase_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
lowerCamelCase_ = "index.faiss"
lowerCamelCase_ = F'mock://{index_name}'
index.save(lowerCamelCase__ , storage_options=mockfs.storage_options )
lowerCamelCase_ = FaissIndex.load(lowerCamelCase__ , storage_options=mockfs.storage_options )
lowerCamelCase_ = np.zeros(5 , dtype=np.floataa )
lowerCamelCase_ = 1
lowerCamelCase_ , lowerCamelCase_ = index.search(lowerCamelCase__ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
from elasticsearch import Elasticsearch
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
lowerCamelCase_ = Elasticsearch()
lowerCamelCase_ = {"acknowledged": True}
lowerCamelCase_ = ElasticSearchIndex(es_client=lowercase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["foo", "bar", "foobar"] )
# single query
lowerCamelCase_ = "foo"
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
lowerCamelCase_ , lowerCamelCase_ = index.search(lowercase )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
lowerCamelCase_ = "foo"
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
lowerCamelCase_ , lowerCamelCase_ = index.search(lowercase , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
lowerCamelCase_ = ["foo", "bar", "foobar"]
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
lowerCamelCase_ , lowerCamelCase_ = index.search_batch(lowercase )
lowerCamelCase_ = [scores[0] for scores in total_scores]
lowerCamelCase_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase ) , 0 )
self.assertListEqual([1, 1, 1] , lowercase )
# batched queries with timeout
lowerCamelCase_ = ["foo", "bar", "foobar"]
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
lowerCamelCase_ , lowerCamelCase_ = index.search_batch(lowercase , request_timeout=30 )
lowerCamelCase_ = [scores[0] for scores in total_scores]
lowerCamelCase_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase ) , 0 )
self.assertListEqual([1, 1, 1] , lowercase )
| 19 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
A_ : Optional[Any] = logging.get_logger(__name__)
def UpperCamelCase (lowercase_: Optional[Any] ) -> List[List[ImageInput]]:
if isinstance(lowercase_ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowercase_ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowercase_ ):
return [[videos]]
raise ValueError(f"""Could not make batched video from {videos}""" )
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: Dict = ['''pixel_values''']
def __init__( self , A__ = True , A__ = None , A__ = PILImageResampling.BILINEAR , A__ = True , A__ = None , A__ = True , A__ = 1 / 255 , A__ = True , A__ = None , A__ = None , **A__ , ):
super().__init__(**A__ )
A__ : Optional[int] = size if size is not None else {"""shortest_edge""": 224}
A__ : List[Any] = get_size_dict(A__ , default_to_square=A__ )
A__ : Dict = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
A__ : Any = get_size_dict(A__ , param_name="""crop_size""" )
A__ : Tuple = do_resize
A__ : int = size
A__ : str = do_center_crop
A__ : Any = crop_size
A__ : int = resample
A__ : Dict = do_rescale
A__ : Any = rescale_factor
A__ : Optional[int] = do_normalize
A__ : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A__ : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __A ( self , A__ , A__ , A__ = PILImageResampling.BILINEAR , A__ = None , **A__ , ):
A__ : Optional[int] = get_size_dict(A__ , default_to_square=A__ )
if "shortest_edge" in size:
A__ : List[Any] = get_resize_output_image_size(A__ , size["""shortest_edge"""] , default_to_square=A__ )
elif "height" in size and "width" in size:
A__ : List[Any] = (size["""height"""], size["""width"""])
else:
raise ValueError(F"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(A__ , size=A__ , resample=A__ , data_format=A__ , **A__ )
def __A ( self , A__ , A__ , A__ = None , **A__ , ):
A__ : Union[str, Any] = get_size_dict(A__ )
if "height" not in size or "width" not in size:
raise ValueError(F"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(A__ , size=(size["""height"""], size["""width"""]) , data_format=A__ , **A__ )
def __A ( self , A__ , A__ , A__ = None , **A__ , ):
return rescale(A__ , scale=A__ , data_format=A__ , **A__ )
def __A ( self , A__ , A__ , A__ , A__ = None , **A__ , ):
return normalize(A__ , mean=A__ , std=A__ , data_format=A__ , **A__ )
def __A ( self , A__ , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = ChannelDimension.FIRST , ):
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
A__ : int = to_numpy_array(A__ )
if do_resize:
A__ : str = self.resize(image=A__ , size=A__ , resample=A__ )
if do_center_crop:
A__ : List[Any] = self.center_crop(A__ , size=A__ )
if do_rescale:
A__ : Tuple = self.rescale(image=A__ , scale=A__ )
if do_normalize:
A__ : Tuple = self.normalize(image=A__ , mean=A__ , std=A__ )
A__ : int = to_channel_dimension_format(A__ , A__ )
return image
def __A ( self , A__ , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = ChannelDimension.FIRST , **A__ , ):
A__ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
A__ : Tuple = resample if resample is not None else self.resample
A__ : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
A__ : Any = do_rescale if do_rescale is not None else self.do_rescale
A__ : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
A__ : Tuple = do_normalize if do_normalize is not None else self.do_normalize
A__ : Optional[int] = image_mean if image_mean is not None else self.image_mean
A__ : str = image_std if image_std is not None else self.image_std
A__ : Any = size if size is not None else self.size
A__ : List[Any] = get_size_dict(A__ , default_to_square=A__ )
A__ : Optional[Any] = crop_size if crop_size is not None else self.crop_size
A__ : List[str] = get_size_dict(A__ , param_name="""crop_size""" )
if not valid_images(A__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
A__ : Any = make_batched(A__ )
A__ : int = [
[
self._preprocess_image(
image=A__ , do_resize=A__ , size=A__ , resample=A__ , do_center_crop=A__ , crop_size=A__ , do_rescale=A__ , rescale_factor=A__ , do_normalize=A__ , image_mean=A__ , image_std=A__ , data_format=A__ , )
for img in video
]
for video in videos
]
A__ : List[str] = {"""pixel_values""": videos}
return BatchFeature(data=A__ , tensor_type=A__ )
| 141 |
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
A_ : Optional[int] = '<<<<<<< This should probably be modified because it mentions: '
A_ : int = '=======\n>>>>>>>\n'
A_ : Optional[Any] = [
'TextEncoderConfig',
'ByteTextEncoder',
'SubwordTextEncoder',
'encoder_config',
'maybe_build_from_corpus',
'manual_dir',
]
A_ : List[Any] = [
# (pattern, replacement)
# Order is important here for some replacements
(r'tfds\.core', r'datasets'),
(r'tf\.io\.gfile\.GFile', r'open'),
(r'tf\.([\w\d]+)', r'datasets.Value(\'\1\')'),
(r'tfds\.features\.Text\(\)', r'datasets.Value(\'string\')'),
(r'tfds\.features\.Text\(', r'datasets.Value(\'string\'),'),
(r'features\s*=\s*tfds.features.FeaturesDict\(', r'features=datasets.Features('),
(r'tfds\.features\.FeaturesDict\(', r'dict('),
(r'The TensorFlow Datasets Authors', r'The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'),
(r'tfds\.', r'datasets.'),
(r'dl_manager\.manual_dir', r'self.config.data_dir'),
(r'self\.builder_config', r'self.config'),
]
def UpperCamelCase (lowercase_: Namespace ) -> str:
return ConvertCommand(args.tfds_path , args.datasets_directory )
class _a (__magic_name__ ):
'''simple docstring'''
@staticmethod
def __A ( A__ ):
A__ : Any = parser.add_parser(
"""convert""" , help="""Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.""" , )
train_parser.add_argument(
"""--tfds_path""" , type=A__ , required=A__ , help="""Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.""" , )
train_parser.add_argument(
"""--datasets_directory""" , type=A__ , required=A__ , help="""Path to the HuggingFace Datasets folder.""" )
train_parser.set_defaults(func=A__ )
def __init__( self , A__ , A__ , *A__ ):
A__ : Optional[Any] = get_logger("""datasets-cli/converting""" )
A__ : Optional[Any] = tfds_path
A__ : Dict = datasets_directory
def __A ( self ):
if os.path.isdir(self._tfds_path ):
A__ : Any = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
A__ : int = os.path.dirname(self._tfds_path )
else:
raise ValueError("""--tfds_path is neither a directory nor a file. Please check path.""" )
A__ : Optional[int] = os.path.abspath(self._datasets_directory )
self._logger.info(F"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""" )
A__ : Optional[Any] = []
A__ : List[Any] = []
A__ : List[Any] = {}
if os.path.isdir(self._tfds_path ):
A__ : List[str] = os.listdir(A__ )
else:
A__ : Optional[Any] = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F"""Looking at file {f_name}""" )
A__ : Any = os.path.join(A__ , A__ )
A__ : Optional[Any] = os.path.join(A__ , A__ )
if not os.path.isfile(A__ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("""Skipping file""" )
continue
with open(A__ , encoding="""utf-8""" ) as f:
A__ : Dict = f.readlines()
A__ : Tuple = []
A__ : str = False
A__ : Tuple = False
A__ : Optional[Any] = []
for line in lines:
A__ : Optional[int] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
A__ : Dict = """import datasets\n"""
elif "import tensorflow" in out_line:
# order is important here
A__ : Any = """"""
continue
elif "from absl import logging" in out_line:
A__ : int = """from datasets import logging\n"""
elif "getLogger" in out_line:
A__ : List[str] = out_line.replace("""getLogger""" , """get_logger""" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
A__ : Optional[Any] = True
A__ : int = list(filter(lambda A__ : e in out_line , A__ ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(A__ ) + """\n""" )
out_lines.append(A__ )
out_lines.append(A__ )
continue
else:
for pattern, replacement in TO_CONVERT:
A__ : List[str] = re.sub(A__ , A__ , A__ )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
A__ : Dict = re.match(r"""from\stensorflow_datasets.*import\s([^\.\r\n]+)""" , A__ )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(""",""" ) )
A__ : Tuple = """from . import """ + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F"""Error converting {out_line.strip()}""" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
A__ : List[str] = True
out_lines.append(A__ )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
A__ : Tuple = f_name.replace(""".py""" , """""" )
A__ : Optional[int] = os.path.join(A__ , A__ )
A__ : Optional[Any] = os.path.join(A__ , A__ )
os.makedirs(A__ , exist_ok=A__ )
self._logger.info(F"""Adding directory {output_dir}""" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(A__ )
if needs_manual_update:
with_manual_update.append(A__ )
with open(A__ , """w""" , encoding="""utf-8""" ) as f:
f.writelines(A__ )
self._logger.info(F"""Converted in {output_file}""" )
for utils_file in utils_files:
try:
A__ : int = os.path.basename(A__ )
A__ : List[str] = imports_to_builder_map[f_name.replace(""".py""" , """""" )]
self._logger.info(F"""Moving {dest_folder} to {utils_file}""" )
shutil.copy(A__ , A__ )
except KeyError:
self._logger.error(F"""Cannot find destination folder for {utils_file}. Please copy manually.""" )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F"""You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'.""" )
| 141 | 1 |
"""simple docstring"""
import math
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
lowerCAmelCase__ : Any = len(__UpperCAmelCase )
lowerCAmelCase__ : int = int(math.floor(math.sqrt(__UpperCAmelCase ) ) )
lowerCAmelCase__ : Optional[int] = 0
while arr[min(__UpperCAmelCase , __UpperCAmelCase ) - 1] < x:
lowerCAmelCase__ : Any = step
step += int(math.floor(math.sqrt(__UpperCAmelCase ) ) )
if prev >= n:
return -1
while arr[prev] < x:
lowerCAmelCase__ : List[Any] = prev + 1
if prev == min(__UpperCAmelCase , __UpperCAmelCase ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
_A = input("""Enter numbers separated by a comma:\n""").strip()
_A = [int(item) for item in user_input.split(""",""")]
_A = int(input("""Enter the number to be searched:\n"""))
_A = jump_search(arr, x)
if res == -1:
print("""Number not found!""")
else:
print(f"""Number {x} is at index {res}""")
| 242 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_A = 1_6
_A = 3_2
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase = 16 ) -> List[str]:
lowerCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowerCAmelCase__ : List[str] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ : Union[str, Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase__ : str = datasets.map(
__UpperCAmelCase , batched=__UpperCAmelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase__ : List[str] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase__ : Union[str, Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase__ : Optional[Any] = 16
elif accelerator.mixed_precision != "no":
lowerCAmelCase__ : Any = 8
else:
lowerCAmelCase__ : Any = None
return tokenizer.pad(
__UpperCAmelCase , padding="""longest""" , max_length=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowerCAmelCase__ : Any = DataLoader(
tokenized_datasets["""train"""] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=__UpperCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_A = mocked_dataloaders # noqa: F811
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __UpperCAmelCase ) == "1":
lowerCAmelCase__ : List[Any] = 2
# New Code #
lowerCAmelCase__ : Tuple = int(args.gradient_accumulation_steps )
# Initialize accelerator
lowerCAmelCase__ : Union[str, Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__UpperCAmelCase )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"""Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase__ : Tuple = config["""lr"""]
lowerCAmelCase__ : int = int(config["""num_epochs"""] )
lowerCAmelCase__ : List[Any] = int(config["""seed"""] )
lowerCAmelCase__ : Tuple = int(config["""batch_size"""] )
lowerCAmelCase__ : Optional[int] = evaluate.load("""glue""" , """mrpc""" )
set_seed(__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = get_dataloaders(__UpperCAmelCase , __UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase__ : Tuple = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase__ : Optional[Any] = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase__ : Optional[int] = AdamW(params=model.parameters() , lr=__UpperCAmelCase )
# Instantiate scheduler
lowerCAmelCase__ : Optional[Any] = get_linear_schedule_with_warmup(
optimizer=__UpperCAmelCase , num_warmup_steps=100 , num_training_steps=(len(__UpperCAmelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : int = accelerator.prepare(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Now we train the model
for epoch in range(__UpperCAmelCase ):
model.train()
for step, batch in enumerate(__UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__UpperCAmelCase ):
lowerCAmelCase__ : str = model(**__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = output.loss
accelerator.backward(__UpperCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase__ : int = model(**__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = outputs.logits.argmax(dim=-1 )
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__UpperCAmelCase , references=__UpperCAmelCase , )
lowerCAmelCase__ : Any = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , __UpperCAmelCase )
def lowercase_ ( ) -> Any:
lowerCAmelCase__ : Union[str, Any] = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__UpperCAmelCase , default=__UpperCAmelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=__UpperCAmelCase , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
lowerCAmelCase__ : List[str] = parser.parse_args()
lowerCAmelCase__ : Any = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(__UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
main()
| 242 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _SCREAMING_SNAKE_CASE :
def __init__( self : Any , a__ : Optional[Any] , a__ : Dict=13 , a__ : Union[str, Any]=32 , a__ : Dict=3 , a__ : Any=4 , a__ : Any=[10, 20, 30, 40] , a__ : Optional[int]=[2, 2, 3, 2] , a__ : Tuple=True , a__ : str=True , a__ : str=37 , a__ : Optional[int]="gelu" , a__ : Dict=10 , a__ : Union[str, Any]=0.02 , a__ : List[Any]=["stage2", "stage3", "stage4"] , a__ : Union[str, Any]=[2, 3, 4] , a__ : List[Any]=None , ):
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = image_size
__magic_name__ = num_channels
__magic_name__ = num_stages
__magic_name__ = hidden_sizes
__magic_name__ = depths
__magic_name__ = is_training
__magic_name__ = use_labels
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = num_labels
__magic_name__ = initializer_range
__magic_name__ = out_features
__magic_name__ = out_indices
__magic_name__ = scope
def snake_case__ ( self : str ):
__magic_name__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.num_labels )
__magic_name__ = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self : Tuple ):
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=a__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def snake_case__ ( self : Dict , a__ : Optional[int] , a__ : Any , a__ : Tuple ):
__magic_name__ = ConvNextModel(config=a__ )
model.to(a__ )
model.eval()
__magic_name__ = model(a__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def snake_case__ ( self : int , a__ : Union[str, Any] , a__ : List[Any] , a__ : Dict ):
__magic_name__ = ConvNextForImageClassification(a__ )
model.to(a__ )
model.eval()
__magic_name__ = model(a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self : Optional[Any] , a__ : Dict , a__ : List[str] , a__ : Union[str, Any] ):
__magic_name__ = ConvNextBackbone(config=a__ )
model.to(a__ )
model.eval()
__magic_name__ = model(a__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__magic_name__ = None
__magic_name__ = ConvNextBackbone(config=a__ )
model.to(a__ )
model.eval()
__magic_name__ = model(a__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def snake_case__ ( self : int ):
__magic_name__ = self.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ = config_and_inputs
__magic_name__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( __a ,__a ,unittest.TestCase ):
__SCREAMING_SNAKE_CASE :Dict = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE :Union[str, Any] = (
{"""feature-extraction""": ConvNextModel, """image-classification""": ConvNextForImageClassification}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE :Optional[int] = True
__SCREAMING_SNAKE_CASE :str = False
__SCREAMING_SNAKE_CASE :List[Any] = False
__SCREAMING_SNAKE_CASE :Union[str, Any] = False
__SCREAMING_SNAKE_CASE :int = False
def snake_case__ ( self : int ):
__magic_name__ = ConvNextModelTester(self )
__magic_name__ = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37 )
def snake_case__ ( self : Optional[int] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case__ ( self : str ):
return
@unittest.skip(reason='''ConvNext does not use inputs_embeds''' )
def snake_case__ ( self : Tuple ):
pass
@unittest.skip(reason='''ConvNext does not support input and output embeddings''' )
def snake_case__ ( self : Dict ):
pass
@unittest.skip(reason='''ConvNext does not use feedforward chunking''' )
def snake_case__ ( self : Union[str, Any] ):
pass
def snake_case__ ( self : Any ):
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ = model_class(a__ )
__magic_name__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ = [*signature.parameters.keys()]
__magic_name__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a__ )
def snake_case__ ( self : List[str] ):
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def snake_case__ ( self : Optional[int] ):
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*a__ )
def snake_case__ ( self : Optional[int] ):
def check_hidden_states_output(a__ : Optional[int] , a__ : List[str] , a__ : Optional[Any] ):
__magic_name__ = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
__magic_name__ = model(**self._prepare_for_class(a__ , a__ ) )
__magic_name__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__magic_name__ = self.model_tester.num_stages
self.assertEqual(len(a__ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ = True
check_hidden_states_output(a__ , a__ , a__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ = True
check_hidden_states_output(a__ , a__ , a__ )
def snake_case__ ( self : Any ):
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
@slow
def snake_case__ ( self : Union[str, Any] ):
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ = ConvNextModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def UpperCamelCase ( ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def snake_case__ ( self : Any ):
return AutoImageProcessor.from_pretrained('''facebook/convnext-tiny-224''' ) if is_vision_available() else None
@slow
def snake_case__ ( self : Optional[int] ):
__magic_name__ = ConvNextForImageClassification.from_pretrained('''facebook/convnext-tiny-224''' ).to(a__ )
__magic_name__ = self.default_image_processor
__magic_name__ = prepare_img()
__magic_name__ = image_processor(images=a__ , return_tensors='''pt''' ).to(a__ )
# forward pass
with torch.no_grad():
__magic_name__ = model(**a__ )
# verify the logits
__magic_name__ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , a__ )
__magic_name__ = torch.tensor([-0.0_260, -0.4_739, 0.1_911] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1E-4 ) )
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ,__a ):
__SCREAMING_SNAKE_CASE :List[Any] = (ConvNextBackbone,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE :List[str] = ConvNextConfig
__SCREAMING_SNAKE_CASE :int = False
def snake_case__ ( self : List[str] ):
__magic_name__ = ConvNextModelTester(self )
| 98 |
'''simple docstring'''
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class _SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] , a__ : Any , a__ : Tuple=sys.maxsize ):
__magic_name__ = '''bilinear'''
__magic_name__ = max_size
__magic_name__ = short_edge_length
def __call__( self : Tuple , a__ : List[str] ):
__magic_name__ = []
for img in imgs:
__magic_name__ , __magic_name__ = img.shape[:2]
# later: provide list and randomly choose index for resize
__magic_name__ = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
__magic_name__ = size * 1.0 / min(a__ , a__ )
if h < w:
__magic_name__ , __magic_name__ = size, scale * w
else:
__magic_name__ , __magic_name__ = scale * h, size
if max(a__ , a__ ) > self.max_size:
__magic_name__ = self.max_size * 1.0 / max(a__ , a__ )
__magic_name__ = newh * scale
__magic_name__ = neww * scale
__magic_name__ = int(neww + 0.5 )
__magic_name__ = int(newh + 0.5 )
if img.dtype == np.uinta:
__magic_name__ = Image.fromarray(a__ )
__magic_name__ = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
__magic_name__ = np.asarray(a__ )
else:
__magic_name__ = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
__magic_name__ = nn.functional.interpolate(
a__ , (newh, neww) , mode=self.interp_method , align_corners=a__ ).squeeze(0 )
img_augs.append(a__ )
return img_augs
class _SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , a__ : Tuple ):
__magic_name__ = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
__magic_name__ = cfg.INPUT.FORMAT
__magic_name__ = cfg.SIZE_DIVISIBILITY
__magic_name__ = cfg.PAD_VALUE
__magic_name__ = cfg.INPUT.MAX_SIZE_TEST
__magic_name__ = cfg.MODEL.DEVICE
__magic_name__ = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
__magic_name__ = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
__magic_name__ = lambda a__ : (x - self.pixel_mean) / self.pixel_std
def snake_case__ ( self : Union[str, Any] , a__ : Dict ):
__magic_name__ = tuple(max(a__ ) for s in zip(*[img.shape for img in images] ) )
__magic_name__ = [im.shape[-2:] for im in images]
__magic_name__ = [
nn.functional.pad(
a__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(a__ , a__ )
]
return torch.stack(a__ ), torch.tensor(a__ )
def __call__( self : Dict , a__ : Dict , a__ : List[str]=False ):
with torch.no_grad():
if not isinstance(a__ , a__ ):
__magic_name__ = [images]
if single_image:
assert len(a__ ) == 1
for i in range(len(a__ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(a__ , images.pop(a__ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
a__ , torch.as_tensor(img_tensorize(images.pop(a__ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
__magic_name__ = torch.tensor([im.shape[:2] for im in images] )
__magic_name__ = self.aug(a__ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
__magic_name__ = [self.normalizer(a__ ) for x in images]
# now pad them to do the following operations
__magic_name__ , __magic_name__ = self.pad(a__ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
__magic_name__ = torch.true_divide(a__ , a__ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def UpperCamelCase ( a , a ) -> List[Any]:
'''simple docstring'''
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def UpperCamelCase ( a , a ) -> Any:
'''simple docstring'''
assert torch.isfinite(a ).all(), "Box tensor contains infinite or NaN!"
__magic_name__ , __magic_name__ = box_size
tensor[:, 0].clamp_(min=0 , max=a )
tensor[:, 1].clamp_(min=0 , max=a )
tensor[:, 2].clamp_(min=0 , max=a )
tensor[:, 3].clamp_(min=0 , max=a )
| 98 | 1 |
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ : Optional[int] = '| <pad> <unk> <s> </s> a b c d e f g h i j k'.split()
UpperCAmelCase_ : Union[str, Any] = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase ) ) ) )
UpperCAmelCase_ : Optional[Any] = {
'unk_token': '<unk>',
'bos_token': '<s>',
'eos_token': '</s>',
}
UpperCAmelCase_ : str = {
'feature_size': 1,
'padding_value': 0.0,
'sampling_rate': 1_6_0_0_0,
'return_attention_mask': False,
'do_normalize': True,
}
UpperCAmelCase_ : List[str] = tempfile.mkdtemp()
UpperCAmelCase_ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
UpperCAmelCase_ : int = os.path.join(self.tmpdirname , _UpperCamelCase )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_UpperCamelCase ) + '\n' )
with open(self.feature_extraction_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_UpperCamelCase ) + '\n' )
# load decoder from hub
UpperCAmelCase_ : List[Any] = 'hf-internal-testing/ngram-beam-search-decoder'
def __UpperCAmelCase ( self , **_UpperCamelCase ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = self.add_kwargs_tokens_map.copy()
kwargs.update(_UpperCamelCase )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def __UpperCAmelCase ( self , **_UpperCamelCase ) -> List[Any]:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def __UpperCAmelCase ( self , **_UpperCamelCase ) -> int:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **_UpperCamelCase )
def __UpperCAmelCase ( self ) -> str:
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : int = self.get_tokenizer()
UpperCAmelCase_ : List[str] = self.get_feature_extractor()
UpperCAmelCase_ : Optional[Any] = self.get_decoder()
UpperCAmelCase_ : List[str] = WavaVecaProcessorWithLM(tokenizer=_UpperCamelCase , feature_extractor=_UpperCamelCase , decoder=_UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : Any = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , _UpperCamelCase )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _UpperCamelCase )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase_ : Optional[Any] = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
UpperCAmelCase_ : Dict = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : List[Any] = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['xx'] )
with self.assertRaisesRegex(_UpperCamelCase , 'include' ):
WavaVecaProcessorWithLM(
tokenizer=_UpperCamelCase , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase_ : Union[str, Any] = self.get_feature_extractor()
UpperCAmelCase_ : Any = self.get_tokenizer()
UpperCAmelCase_ : Optional[Any] = self.get_decoder()
UpperCAmelCase_ : List[Any] = WavaVecaProcessorWithLM(tokenizer=_UpperCamelCase , feature_extractor=_UpperCamelCase , decoder=_UpperCamelCase )
UpperCAmelCase_ : Tuple = floats_list((3, 1_0_0_0) )
UpperCAmelCase_ : List[str] = feature_extractor(_UpperCamelCase , return_tensors='np' )
UpperCAmelCase_ : Any = processor(_UpperCamelCase , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase_ : Optional[int] = self.get_feature_extractor()
UpperCAmelCase_ : Optional[int] = self.get_tokenizer()
UpperCAmelCase_ : List[Any] = self.get_decoder()
UpperCAmelCase_ : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=_UpperCamelCase , feature_extractor=_UpperCamelCase , decoder=_UpperCamelCase )
UpperCAmelCase_ : List[Any] = 'This is a test string'
UpperCAmelCase_ : str = processor(text=_UpperCamelCase )
UpperCAmelCase_ : List[str] = tokenizer(_UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCAmelCase ( self , _UpperCamelCase=(2, 1_0, 1_6) , _UpperCamelCase=7_7 ) -> Optional[Any]:
np.random.seed(_UpperCamelCase )
return np.random.rand(*_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : int = self.get_feature_extractor()
UpperCAmelCase_ : int = self.get_tokenizer()
UpperCAmelCase_ : Optional[int] = self.get_decoder()
UpperCAmelCase_ : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=_UpperCamelCase , feature_extractor=_UpperCamelCase , decoder=_UpperCamelCase )
UpperCAmelCase_ : int = self._get_dummy_logits(shape=(1_0, 1_6) , seed=1_3 )
UpperCAmelCase_ : Union[str, Any] = processor.decode(_UpperCamelCase )
UpperCAmelCase_ : List[Any] = decoder.decode_beams(_UpperCamelCase )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('</s> <s> </s>' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['fork'], ['spawn']] )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> str:
UpperCAmelCase_ : List[Any] = self.get_feature_extractor()
UpperCAmelCase_ : str = self.get_tokenizer()
UpperCAmelCase_ : Tuple = self.get_decoder()
UpperCAmelCase_ : Dict = WavaVecaProcessorWithLM(tokenizer=_UpperCamelCase , feature_extractor=_UpperCamelCase , decoder=_UpperCamelCase )
UpperCAmelCase_ : int = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
UpperCAmelCase_ : Union[str, Any] = processor.batch_decode(_UpperCamelCase )
else:
with get_context(_UpperCamelCase ).Pool() as pool:
UpperCAmelCase_ : Union[str, Any] = processor.batch_decode(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : List[Any] = list(_UpperCamelCase )
with get_context('fork' ).Pool() as p:
UpperCAmelCase_ : Dict = decoder.decode_beams_batch(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(_UpperCamelCase , decoded_processor.text )
self.assertListEqual(['<s> <s> </s>', '<s> <s> <s>'] , decoded_processor.text )
self.assertListEqual(_UpperCamelCase , decoded_processor.logit_score )
self.assertListEqual(_UpperCamelCase , decoded_processor.lm_score )
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase_ : Optional[int] = self.get_feature_extractor()
UpperCAmelCase_ : Optional[Any] = self.get_tokenizer()
UpperCAmelCase_ : Any = self.get_decoder()
UpperCAmelCase_ : int = WavaVecaProcessorWithLM(tokenizer=_UpperCamelCase , feature_extractor=_UpperCamelCase , decoder=_UpperCamelCase )
UpperCAmelCase_ : Any = self._get_dummy_logits()
UpperCAmelCase_ : List[str] = 1_5
UpperCAmelCase_ : str = -20.0
UpperCAmelCase_ : List[Any] = -4.0
UpperCAmelCase_ : Dict = processor.batch_decode(
_UpperCamelCase , beam_width=_UpperCamelCase , beam_prune_logp=_UpperCamelCase , token_min_logp=_UpperCamelCase , )
UpperCAmelCase_ : List[str] = decoded_processor_out.text
UpperCAmelCase_ : Dict = list(_UpperCamelCase )
with get_context('fork' ).Pool() as pool:
UpperCAmelCase_ : Optional[int] = decoder.decode_beams_batch(
_UpperCamelCase , _UpperCamelCase , beam_width=_UpperCamelCase , beam_prune_logp=_UpperCamelCase , token_min_logp=_UpperCamelCase , )
UpperCAmelCase_ : Dict = [d[0][0] for d in decoded_decoder_out]
UpperCAmelCase_ : Union[str, Any] = [d[0][2] for d in decoded_decoder_out]
UpperCAmelCase_ : str = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
self.assertListEqual(['</s> <s> <s>', '<s> <s> <s>'] , _UpperCamelCase )
self.assertTrue(np.array_equal(_UpperCamelCase , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.0_54, -18.4_47] , _UpperCamelCase , atol=1E-3 ) )
self.assertTrue(np.array_equal(_UpperCamelCase , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.5_54, -13.94_74] , _UpperCamelCase , atol=1E-3 ) )
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : str = self.get_feature_extractor()
UpperCAmelCase_ : Tuple = self.get_tokenizer()
UpperCAmelCase_ : List[str] = self.get_decoder()
UpperCAmelCase_ : Tuple = WavaVecaProcessorWithLM(tokenizer=_UpperCamelCase , feature_extractor=_UpperCamelCase , decoder=_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = self._get_dummy_logits()
UpperCAmelCase_ : List[Any] = 2.0
UpperCAmelCase_ : List[str] = 5.0
UpperCAmelCase_ : Optional[Any] = -20.0
UpperCAmelCase_ : Union[str, Any] = True
UpperCAmelCase_ : Any = processor.batch_decode(
_UpperCamelCase , alpha=_UpperCamelCase , beta=_UpperCamelCase , unk_score_offset=_UpperCamelCase , lm_score_boundary=_UpperCamelCase , )
UpperCAmelCase_ : Dict = decoded_processor_out.text
UpperCAmelCase_ : Any = list(_UpperCamelCase )
decoder.reset_params(
alpha=_UpperCamelCase , beta=_UpperCamelCase , unk_score_offset=_UpperCamelCase , lm_score_boundary=_UpperCamelCase , )
with get_context('fork' ).Pool() as pool:
UpperCAmelCase_ : Tuple = decoder.decode_beams_batch(
_UpperCamelCase , _UpperCamelCase , )
UpperCAmelCase_ : List[Any] = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
self.assertListEqual(['<s> </s> <s> </s> </s>', '</s> </s> <s> </s> </s>'] , _UpperCamelCase )
UpperCAmelCase_ : int = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Any = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
UpperCAmelCase_ : Optional[Any] = processor.decoder.model_container[processor.decoder._model_key]
UpperCAmelCase_ : Union[str, Any] = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
UpperCAmelCase_ : List[Any] = os.listdir(_UpperCamelCase )
UpperCAmelCase_ : List[str] = ['alphabet.json', 'language_model']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : List[Any] = snapshot_download('hf-internal-testing/processor_with_lm' )
UpperCAmelCase_ : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained(_UpperCamelCase )
UpperCAmelCase_ : Any = processor.decoder.model_container[processor.decoder._model_key]
UpperCAmelCase_ : Dict = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
UpperCAmelCase_ : int = os.listdir(_UpperCamelCase )
UpperCAmelCase_ : Dict = os.listdir(_UpperCamelCase )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : Any = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
UpperCAmelCase_ : Any = AutoProcessor.from_pretrained('hf-internal-testing/processor_with_lm' )
UpperCAmelCase_ : Union[str, Any] = floats_list((3, 1_0_0_0) )
UpperCAmelCase_ : Tuple = processor_wavaveca(_UpperCamelCase , return_tensors='np' )
UpperCAmelCase_ : List[str] = processor_auto(_UpperCamelCase , return_tensors='np' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
UpperCAmelCase_ : Optional[Any] = self._get_dummy_logits()
UpperCAmelCase_ : List[Any] = processor_wavaveca.batch_decode(_UpperCamelCase )
UpperCAmelCase_ : List[str] = processor_auto.batch_decode(_UpperCamelCase )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase_ : str = self.get_feature_extractor()
UpperCAmelCase_ : Any = self.get_tokenizer()
UpperCAmelCase_ : List[str] = self.get_decoder()
UpperCAmelCase_ : Dict = WavaVecaProcessorWithLM(tokenizer=_UpperCamelCase , feature_extractor=_UpperCamelCase , decoder=_UpperCamelCase )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
@staticmethod
def __UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase ) -> str:
UpperCAmelCase_ : Optional[int] = [d[key] for d in offsets]
return retrieved_list
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase_ : List[str] = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
UpperCAmelCase_ : Tuple = self._get_dummy_logits()[0]
UpperCAmelCase_ : Union[str, Any] = processor.decode(_UpperCamelCase , output_word_offsets=_UpperCamelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(_UpperCamelCase , _UpperCamelCase ) )
self.assertEqual(' '.join(self.get_from_offsets(outputs['word_offsets'] , 'word' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'end_offset' ) , [1, 3, 5] )
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
UpperCAmelCase_ : List[Any] = self._get_dummy_logits()
UpperCAmelCase_ : Any = processor.batch_decode(_UpperCamelCase , output_word_offsets=_UpperCamelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(_UpperCamelCase , _UpperCamelCase ) )
self.assertListEqual(
[' '.join(self.get_from_offsets(_UpperCamelCase , 'word' ) ) for o in outputs['word_offsets']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'end_offset' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def __UpperCAmelCase ( self ) -> List[str]:
import torch
UpperCAmelCase_ : Optional[Any] = load_dataset('common_voice' , 'en' , split='train' , streaming=_UpperCamelCase )
UpperCAmelCase_ : Dict = ds.cast_column('audio' , datasets.Audio(sampling_rate=1_6_0_0_0 ) )
UpperCAmelCase_ : Any = iter(_UpperCamelCase )
UpperCAmelCase_ : int = next(_UpperCamelCase )
UpperCAmelCase_ : List[str] = AutoProcessor.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
UpperCAmelCase_ : List[Any] = WavaVecaForCTC.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
UpperCAmelCase_ : Dict = processor(sample['audio']['array'] , return_tensors='pt' ).input_values
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(_UpperCamelCase ).logits.cpu().numpy()
UpperCAmelCase_ : List[Any] = processor.decode(logits[0] , output_word_offsets=_UpperCamelCase )
UpperCAmelCase_ : List[str] = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
UpperCAmelCase_ : Tuple = [
{
'start_time': d['start_offset'] * time_offset,
'end_time': d['end_offset'] * time_offset,
'word': d['word'],
}
for d in output['word_offsets']
]
UpperCAmelCase_ : List[str] = 'WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'
# output words
self.assertEqual(' '.join(self.get_from_offsets(_UpperCamelCase , 'word' ) ) , _UpperCamelCase )
self.assertEqual(' '.join(self.get_from_offsets(_UpperCamelCase , 'word' ) ) , output.text )
# output times
UpperCAmelCase_ : Tuple = torch.tensor(self.get_from_offsets(_UpperCamelCase , 'start_time' ) )
UpperCAmelCase_ : List[Any] = torch.tensor(self.get_from_offsets(_UpperCamelCase , 'end_time' ) )
# fmt: off
UpperCAmelCase_ : Tuple = torch.tensor([1.41_99, 1.65_99, 2.25_99, 3.0, 3.24, 3.59_99, 3.79_99, 4.09_99, 4.26, 4.94, 5.28, 5.65_99, 5.78, 5.94, 6.32, 6.53_99, 6.65_99] )
UpperCAmelCase_ : Optional[int] = torch.tensor([1.53_99, 1.89_99, 2.9, 3.16, 3.53_99, 3.72, 4.01_99, 4.17_99, 4.76, 5.15_99, 5.55_99, 5.69_99, 5.86, 6.19_99, 6.38, 6.61_99, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=0.01 ) )
self.assertTrue(torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=0.01 ) )
| 29 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 249 | 0 |
"""simple docstring"""
import flax.linen as nn
import jax
import jax.numpy as jnp
class _A ( nn.Module ):
"""simple docstring"""
UpperCAmelCase : int
UpperCAmelCase : jnp.dtype = jnp.floataa
def __snake_case ( self : List[Any]):
a : Dict = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : List[str] , __UpperCAmelCase : Optional[Any]):
a : Union[str, Any] = hidden_states.shape
a : List[str] = jax.image.resize(
__UpperCAmelCase , shape=(batch, height * 2, width * 2, channels) , method="nearest" , )
a : List[Any] = self.conv(__UpperCAmelCase)
return hidden_states
class _A ( nn.Module ):
"""simple docstring"""
UpperCAmelCase : int
UpperCAmelCase : jnp.dtype = jnp.floataa
def __snake_case ( self : str):
a : Optional[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : int , __UpperCAmelCase : str):
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
a : int = self.conv(__UpperCAmelCase)
return hidden_states
class _A ( nn.Module ):
"""simple docstring"""
UpperCAmelCase : int
UpperCAmelCase : int = None
UpperCAmelCase : float = 0.0
UpperCAmelCase : bool = None
UpperCAmelCase : jnp.dtype = jnp.floataa
def __snake_case ( self : Optional[Any]):
a : Any = self.in_channels if self.out_channels is None else self.out_channels
a : List[str] = nn.GroupNorm(num_groups=32 , epsilon=1e-5)
a : str = nn.Conv(
__UpperCAmelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
a : Union[str, Any] = nn.Dense(__UpperCAmelCase , dtype=self.dtype)
a : List[Any] = nn.GroupNorm(num_groups=32 , epsilon=1e-5)
a : str = nn.Dropout(self.dropout_prob)
a : str = nn.Conv(
__UpperCAmelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
a : List[Any] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
a : Tuple = None
if use_nin_shortcut:
a : Optional[Any] = nn.Conv(
__UpperCAmelCase , kernel_size=(1, 1) , strides=(1, 1) , padding="VALID" , dtype=self.dtype , )
def __call__( self : List[str] , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Any=True):
a : Optional[int] = hidden_states
a : List[Any] = self.norma(__UpperCAmelCase)
a : Union[str, Any] = nn.swish(__UpperCAmelCase)
a : List[Any] = self.conva(__UpperCAmelCase)
a : Optional[int] = self.time_emb_proj(nn.swish(__UpperCAmelCase))
a : Optional[Any] = jnp.expand_dims(jnp.expand_dims(__UpperCAmelCase , 1) , 1)
a : Any = hidden_states + temb
a : Optional[int] = self.norma(__UpperCAmelCase)
a : Optional[Any] = nn.swish(__UpperCAmelCase)
a : List[str] = self.dropout(__UpperCAmelCase , __UpperCAmelCase)
a : int = self.conva(__UpperCAmelCase)
if self.conv_shortcut is not None:
a : str = self.conv_shortcut(__UpperCAmelCase)
return hidden_states + residual
| 366 |
"""simple docstring"""
def lowercase ( A_ , A_ )-> float:
'''simple docstring'''
def get_matched_characters(A_ , A_ ) -> str:
a : Optional[int] = []
a : List[Any] = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
a : int = int(max(0 , i - limit ) )
a : Optional[int] = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(A_ )
a : int = F'''{_stra[0:_stra.index(A_ )]} {_stra[_stra.index(A_ ) + 1:]}'''
return "".join(A_ )
# matching characters
a : Tuple = get_matched_characters(A_ , A_ )
a : str = get_matched_characters(A_ , A_ )
a : List[str] = len(A_ )
# transposition
a : Union[str, Any] = (
len([(ca, ca) for ca, ca in zip(A_ , A_ ) if ca != ca] ) // 2
)
if not match_count:
a : Tuple = 0.0
else:
a : List[str] = (
1
/ 3
* (
match_count / len(A_ )
+ match_count / len(A_ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
a : Union[str, Any] = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("""hello""", """world"""))
| 226 | 0 |
"""simple docstring"""
from __future__ import annotations
import os
from typing import Any
import requests
A : List[Any] = "https://api.github.com"
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
A : str = BASE_URL + "/user"
# https://github.com/settings/tokens
A : List[str] = os.environ.get("USER_TOKEN", "")
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = {
"Authorization": f"token {auth_token}",
"Accept": "application/vnd.github.v3+json",
}
return requests.get(snake_case__ , headers=snake_case__ ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f'''{key}: {value}''')
else:
raise ValueError("\'USER_TOKEN\' field cannot be empty.")
| 57 |
"""simple docstring"""
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
a = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( _a ):
_a = ['audio_values', 'audio_mask']
def __init__( self : Optional[int] , lowerCAmelCase : List[str]=2048 , lowerCAmelCase : List[Any]=1 , lowerCAmelCase : Optional[Any]=[16, 16] , lowerCAmelCase : Optional[Any]=128 , lowerCAmelCase : Union[str, Any]=4_4100 , lowerCAmelCase : Any=86 , lowerCAmelCase : List[Any]=2048 , lowerCAmelCase : List[str]=0.0 , **lowerCAmelCase : Any , ):
super().__init__(
feature_size=lowerCAmelCase , sampling_rate=lowerCAmelCase , padding_value=lowerCAmelCase , **lowerCAmelCase , )
lowerCAmelCase = spectrogram_length
lowerCAmelCase = num_channels
lowerCAmelCase = patch_size
lowerCAmelCase = feature_size // self.patch_size[1]
lowerCAmelCase = n_fft
lowerCAmelCase = sampling_rate // hop_length_to_sampling_rate
lowerCAmelCase = sampling_rate
lowerCAmelCase = padding_value
lowerCAmelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowerCAmelCase , min_frequency=0.0 , max_frequency=2_2050.0 , sampling_rate=lowerCAmelCase , norm="""slaney""" , mel_scale="""slaney""" , ).T
def __lowercase ( self : int , lowerCAmelCase : np.array ):
lowerCAmelCase = spectrogram(
lowerCAmelCase , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=80.0 , )
lowerCAmelCase = log_spec[:, :-1]
lowerCAmelCase = log_spec - 20.0
lowerCAmelCase = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : Dict , lowerCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowerCAmelCase : Optional[Union[str, TensorType]] = None , lowerCAmelCase : Optional[bool] = True , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : bool = False , lowerCAmelCase : bool = False , **lowerCAmelCase : Dict , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"""This feature extractor is set to support sampling rate"""
f''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
f''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
lowerCAmelCase = isinstance(lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
lowerCAmelCase = is_batched_numpy or (
isinstance(lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase , np.ndarray ):
lowerCAmelCase = np.asarray(lowerCAmelCase , dtype=np.floataa )
elif isinstance(lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowerCAmelCase = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , lowerCAmelCase ):
lowerCAmelCase = [np.asarray(lowerCAmelCase , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowerCAmelCase = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowerCAmelCase = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowerCAmelCase = np.array(lowerCAmelCase ).astype(np.floataa )
# convert into correct format for padding
lowerCAmelCase = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowerCAmelCase = np.ones([len(lowerCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowerCAmelCase = padded_audio_features * self.padding_value
for i in range(len(lowerCAmelCase ) ):
lowerCAmelCase = audio_features[i]
lowerCAmelCase = feature
# return as BatchFeature
if return_attention_mask:
lowerCAmelCase = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask}
else:
lowerCAmelCase = {"""audio_values""": padded_audio_features}
lowerCAmelCase = BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase )
return encoded_inputs
| 155 | 0 |
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
lowerCamelCase : List[str] = logging.get_logger(__name__)
lowerCamelCase : Tuple = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
'constant': get_constant_schedule,
'constant_w_warmup': get_constant_schedule_with_warmup,
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
def __init__( self , A=None , A=None , *A , **A ) -> Union[str, Any]:
super().__init__(*A , **A )
if config is None:
assert isinstance(self.model , A ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f""" {self.model.__class__}"""
)
snake_case : Optional[int] = self.model.config
else:
snake_case : Dict = config
snake_case : Tuple = data_args
snake_case : str = self.config.tgt_vocab_size if isinstance(self.config , A ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"""
""" padding..""" )
if self.args.label_smoothing == 0:
snake_case : Optional[Any] = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
snake_case : Dict = label_smoothed_nll_loss
def UpperCAmelCase ( self , A ) -> Tuple:
if self.optimizer is None:
snake_case : str = ["""bias""", """LayerNorm.weight"""]
snake_case : int = [
{
"""params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"""weight_decay""": self.args.weight_decay,
},
{
"""params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
snake_case : Any = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
snake_case : int = Adafactor
snake_case : str = {"""scale_parameter""": False, """relative_step""": False}
else:
snake_case : List[str] = AdamW
snake_case : List[str] = {
"""betas""": (self.args.adam_betaa, self.args.adam_betaa),
"""eps""": self.args.adam_epsilon,
}
snake_case : Tuple = self.args.learning_rate
if self.sharded_ddp:
snake_case : Optional[Any] = OSS(
params=A , optim=A , **A , )
else:
snake_case : Tuple = optimizer_cls(A , **A )
if self.lr_scheduler is None:
snake_case : Optional[Any] = self._get_lr_scheduler(A )
else: # ignoring --lr_scheduler
logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" )
def UpperCAmelCase ( self , A ) -> Optional[int]:
snake_case : str = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
snake_case : Optional[Any] = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
snake_case : int = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
snake_case : Tuple = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=A )
return scheduler
def UpperCAmelCase ( self ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def UpperCAmelCase ( self , A , A , A ) -> List[str]:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
snake_case : str = model(**A , use_cache=A )[0]
snake_case : List[str] = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
snake_case , snake_case : Tuple = model(**A , labels=A , use_cache=A )[:2]
else:
# compute label smoothed loss
snake_case : Dict = model(**A , use_cache=A )[0]
snake_case : List[str] = torch.nn.functional.log_softmax(A , dim=-1 )
snake_case , snake_case : Optional[Any] = self.loss_fn(A , A , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def UpperCAmelCase ( self , A , A ) -> Union[str, Any]:
snake_case : List[str] = inputs.pop("""labels""" )
snake_case , snake_case : Any = self._compute_loss(A , A , A )
return loss
def UpperCAmelCase ( self , A , A , A , A = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
snake_case : Optional[int] = self._prepare_inputs(A )
snake_case : Dict = {
"""max_length""": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"""num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
snake_case : List[str] = self.model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **A , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
snake_case : List[str] = self._pad_tensors_to_max_len(A , gen_kwargs["""max_length"""] )
snake_case : Union[str, Any] = inputs.pop("""labels""" )
with torch.no_grad():
# compute loss on predict data
snake_case , snake_case : int = self._compute_loss(A , A , A )
snake_case : Union[str, Any] = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
snake_case : Dict = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
snake_case : Optional[int] = self._pad_tensors_to_max_len(A , gen_kwargs["""max_length"""] )
return (loss, logits, labels)
def UpperCAmelCase ( self , A , A ) -> List[str]:
# If PAD token is not defined at least EOS token has to be defined
snake_case : Tuple = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"""Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"""
f""" padded to `max_length`={max_length}""" )
snake_case : Dict = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
snake_case : Tuple = tensor
return padded_tensor
| 176 |
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[str]:
return ConvertCommand(
args.model_type ,args.tf_checkpoint ,args.pytorch_dump_output ,args.config ,args.finetuning_task_name )
lowerCamelCase : Tuple = '\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n'
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
@staticmethod
def UpperCAmelCase ( A ) -> List[str]:
snake_case : Union[str, Any] = parser.add_parser(
"""convert""" , help="""CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.""" , )
train_parser.add_argument("""--model_type""" , type=A , required=A , help="""Model's type.""" )
train_parser.add_argument(
"""--tf_checkpoint""" , type=A , required=A , help="""TensorFlow checkpoint path or folder.""" )
train_parser.add_argument(
"""--pytorch_dump_output""" , type=A , required=A , help="""Path to the PyTorch saved model output.""" )
train_parser.add_argument("""--config""" , type=A , default="""""" , help="""Configuration file path or folder.""" )
train_parser.add_argument(
"""--finetuning_task_name""" , type=A , default=A , help="""Optional fine-tuning task name if the TF model was a finetuned model.""" , )
train_parser.set_defaults(func=A )
def __init__( self , A , A , A , A , A , *A , ) -> List[Any]:
snake_case : Any = logging.get_logger("""transformers-cli/converting""" )
self._logger.info(f"""Loading model {model_type}""" )
snake_case : int = model_type
snake_case : Any = tf_checkpoint
snake_case : int = pytorch_dump_output
snake_case : List[str] = config
snake_case : Tuple = finetuning_task_name
def UpperCAmelCase ( self ) -> int:
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(A )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A )
if "ckpt" in self._tf_checkpoint.lower():
snake_case : List[Any] = self._tf_checkpoint
snake_case : Tuple = """"""
else:
snake_case : Tuple = self._tf_checkpoint
snake_case : Tuple = """"""
convert_transfo_xl_checkpoint_to_pytorch(
A , self._config , self._pytorch_dump_output , A )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
"""--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]""" )
| 176 | 1 |
'''simple docstring'''
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def __UpperCamelCase ( lowercase__ : Optional[Any], lowercase__ : List[Any], lowercase__ : Dict=[] ):
'''simple docstring'''
__lowercase =size[0] - overlap_pixels * 2
__lowercase =size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
__lowercase =np.ones((size_y, size_x), dtype=np.uinta ) * 2_55
__lowercase =np.pad(lowercase__, mode='linear_ramp', pad_width=lowercase__, end_values=0 )
if "l" in remove_borders:
__lowercase =mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
__lowercase =mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
__lowercase =mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
__lowercase =mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def __UpperCamelCase ( lowercase__ : str, lowercase__ : Optional[int], lowercase__ : Dict ):
'''simple docstring'''
return max(lowercase__, min(lowercase__, lowercase__ ) )
def __UpperCamelCase ( lowercase__ : [int], lowercase__ : [int], lowercase__ : [int] ):
'''simple docstring'''
return (
clamp(rect[0], min[0], max[0] ),
clamp(rect[1], min[1], max[1] ),
clamp(rect[2], min[0], max[0] ),
clamp(rect[3], min[1], max[1] ),
)
def __UpperCamelCase ( lowercase__ : [int], lowercase__ : int, lowercase__ : [int] ):
'''simple docstring'''
__lowercase =list(lowercase__ )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
__lowercase =clamp_rect(lowercase__, [0, 0], [image_size[0], image_size[1]] )
return rect
def __UpperCamelCase ( lowercase__ : List[Any], lowercase__ : int, lowercase__ : List[str], lowercase__ : Optional[Any] ):
'''simple docstring'''
__lowercase =Image.new('RGB', (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]), Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ), (0, 0), )
result.paste(lowercase__, (original_slice, 0) )
return result
def __UpperCamelCase ( lowercase__ : str, lowercase__ : Optional[Any] ):
'''simple docstring'''
__lowercase =(original_image_slice * 4, 0, tile.size[0], tile.size[1])
__lowercase =tile.crop(lowercase__ )
return tile
def __UpperCamelCase ( lowercase__ : Optional[Any], lowercase__ : Dict ):
'''simple docstring'''
__lowercase =n % d
return n - divisor
class lowerCAmelCase ( A ):
def __init__( self : Optional[int] , __lowercase : AutoencoderKL , __lowercase : CLIPTextModel , __lowercase : CLIPTokenizer , __lowercase : UNetaDConditionModel , __lowercase : DDPMScheduler , __lowercase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __lowercase : int = 350 , ):
"""simple docstring"""
super().__init__(
vae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , unet=__lowercase , low_res_scheduler=__lowercase , scheduler=__lowercase , max_noise_level=__lowercase , )
def snake_case ( self : List[str] , __lowercase : Union[str, Any] , __lowercase : List[str] , __lowercase : str , __lowercase : Optional[Any] , __lowercase : Optional[int] , __lowercase : Optional[int] , __lowercase : Tuple , **__lowercase : Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
__lowercase =(
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
__lowercase =add_overlap_rect(__lowercase , __lowercase , image.size )
__lowercase =image.crop(__lowercase )
__lowercase =((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
__lowercase =translated_slice_x - (original_image_slice / 2)
__lowercase =max(0 , __lowercase )
__lowercase =squeeze_tile(__lowercase , __lowercase , __lowercase , __lowercase )
__lowercase =to_input.size
__lowercase =to_input.resize((tile_size, tile_size) , Image.BICUBIC )
__lowercase =super(__lowercase , self ).__call__(image=__lowercase , **__lowercase ).images[0]
__lowercase =upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
__lowercase =unsqueeze_tile(__lowercase , __lowercase )
__lowercase =upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
__lowercase =[]
if x == 0:
remove_borders.append('l' )
elif crop_rect[2] == image.size[0]:
remove_borders.append('r' )
if y == 0:
remove_borders.append('t' )
elif crop_rect[3] == image.size[1]:
remove_borders.append('b' )
__lowercase =Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=__lowercase ) , mode='L' , )
final_image.paste(
__lowercase , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , __lowercase )
@torch.no_grad()
def __call__( self : Optional[Any] , __lowercase : Union[str, List[str]] , __lowercase : Union[PIL.Image.Image, List[PIL.Image.Image]] , __lowercase : int = 75 , __lowercase : float = 9.0 , __lowercase : int = 50 , __lowercase : Optional[Union[str, List[str]]] = None , __lowercase : Optional[int] = 1 , __lowercase : float = 0.0 , __lowercase : Optional[torch.Generator] = None , __lowercase : Optional[torch.FloatTensor] = None , __lowercase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __lowercase : int = 1 , __lowercase : int = 128 , __lowercase : int = 32 , __lowercase : int = 32 , ):
"""simple docstring"""
__lowercase =Image.new('RGB' , (image.size[0] * 4, image.size[1] * 4) )
__lowercase =math.ceil(image.size[0] / tile_size )
__lowercase =math.ceil(image.size[1] / tile_size )
__lowercase =tcx * tcy
__lowercase =0
for y in range(__lowercase ):
for x in range(__lowercase ):
self._process_tile(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , prompt=__lowercase , num_inference_steps=__lowercase , guidance_scale=__lowercase , noise_level=__lowercase , negative_prompt=__lowercase , num_images_per_prompt=__lowercase , eta=__lowercase , generator=__lowercase , latents=__lowercase , )
current_count += 1
if callback is not None:
callback({'progress': current_count / total_tile_count, 'image': final_image} )
return final_image
def __UpperCamelCase ( ):
'''simple docstring'''
__lowercase ='stabilityai/stable-diffusion-x4-upscaler'
__lowercase =StableDiffusionTiledUpscalePipeline.from_pretrained(lowercase__, revision='fp16', torch_dtype=torch.floataa )
__lowercase =pipe.to('cuda' )
__lowercase =Image.open('../../docs/source/imgs/diffusers_library.jpg' )
def callback(lowercase__ : Dict ):
print(F'''progress: {obj["progress"]:.4f}''' )
obj["image"].save('diffusers_library_progress.jpg' )
__lowercase =pipe(image=lowercase__, prompt='Black font, white background, vector', noise_level=40, callback=lowercase__ )
final_image.save('diffusers_library.jpg' )
if __name__ == "__main__":
main()
| 141 |
'''simple docstring'''
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class lowerCAmelCase :
def __init__( self : List[str] , __lowercase : Collection[float] | None = None ):
"""simple docstring"""
if components is None:
__lowercase =[]
__lowercase =list(__lowercase )
def __len__( self : Union[str, Any] ):
"""simple docstring"""
return len(self.__components )
def __str__( self : int ):
"""simple docstring"""
return "(" + ",".join(map(__lowercase , self.__components ) ) + ")"
def __add__( self : List[Any] , __lowercase : Vector ):
"""simple docstring"""
__lowercase =len(self )
if size == len(__lowercase ):
__lowercase =[self.__components[i] + other.component(__lowercase ) for i in range(__lowercase )]
return Vector(__lowercase )
else:
raise Exception('must have the same size' )
def __sub__( self : str , __lowercase : Vector ):
"""simple docstring"""
__lowercase =len(self )
if size == len(__lowercase ):
__lowercase =[self.__components[i] - other.component(__lowercase ) for i in range(__lowercase )]
return Vector(__lowercase )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self : Tuple , __lowercase : float ):
"""simple docstring"""
...
@overload
def __mul__( self : Tuple , __lowercase : Vector ):
"""simple docstring"""
...
def __mul__( self : int , __lowercase : float | Vector ):
"""simple docstring"""
if isinstance(__lowercase , (float, int) ):
__lowercase =[c * other for c in self.__components]
return Vector(__lowercase )
elif isinstance(__lowercase , __lowercase ) and len(self ) == len(__lowercase ):
__lowercase =len(self )
__lowercase =[self.__components[i] * other.component(__lowercase ) for i in range(__lowercase )]
return sum(__lowercase )
else: # error case
raise Exception('invalid operand!' )
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
return Vector(self.__components )
def snake_case ( self : str , __lowercase : int ):
"""simple docstring"""
if isinstance(__lowercase , __lowercase ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def snake_case ( self : List[str] , __lowercase : int , __lowercase : float ):
"""simple docstring"""
assert -len(self.__components ) <= pos < len(self.__components )
__lowercase =value
def snake_case ( self : Tuple ):
"""simple docstring"""
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
__lowercase =[c**2 for c in self.__components]
return math.sqrt(sum(__lowercase ) )
def snake_case ( self : List[Any] , __lowercase : Vector , __lowercase : bool = False ):
"""simple docstring"""
__lowercase =self * other
__lowercase =self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def __UpperCamelCase ( lowercase__ : int ):
'''simple docstring'''
assert isinstance(lowercase__, lowercase__ )
return Vector([0] * dimension )
def __UpperCamelCase ( lowercase__ : int, lowercase__ : int ):
'''simple docstring'''
assert isinstance(lowercase__, lowercase__ ) and (isinstance(lowercase__, lowercase__ ))
__lowercase =[0] * dimension
__lowercase =1
return Vector(lowercase__ )
def __UpperCamelCase ( lowercase__ : float, lowercase__ : Vector, lowercase__ : Vector ):
'''simple docstring'''
assert (
isinstance(lowercase__, lowercase__ )
and isinstance(lowercase__, lowercase__ )
and (isinstance(lowercase__, (int, float) ))
)
return x * scalar + y
def __UpperCamelCase ( lowercase__ : int, lowercase__ : int, lowercase__ : int ):
'''simple docstring'''
random.seed(lowercase__ )
__lowercase =[random.randint(lowercase__, lowercase__ ) for _ in range(lowercase__ )]
return Vector(lowercase__ )
class lowerCAmelCase :
def __init__( self : Dict , __lowercase : list[list[float]] , __lowercase : int , __lowercase : int ):
"""simple docstring"""
__lowercase =matrix
__lowercase =w
__lowercase =h
def __str__( self : Optional[Any] ):
"""simple docstring"""
__lowercase =''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : Union[str, Any] , __lowercase : Matrix ):
"""simple docstring"""
if self.__width == other.width() and self.__height == other.height():
__lowercase =[]
for i in range(self.__height ):
__lowercase =[
self.__matrix[i][j] + other.component(__lowercase , __lowercase )
for j in range(self.__width )
]
matrix.append(__lowercase )
return Matrix(__lowercase , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self : int , __lowercase : Matrix ):
"""simple docstring"""
if self.__width == other.width() and self.__height == other.height():
__lowercase =[]
for i in range(self.__height ):
__lowercase =[
self.__matrix[i][j] - other.component(__lowercase , __lowercase )
for j in range(self.__width )
]
matrix.append(__lowercase )
return Matrix(__lowercase , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self : int , __lowercase : float ):
"""simple docstring"""
...
@overload
def __mul__( self : str , __lowercase : Vector ):
"""simple docstring"""
...
def __mul__( self : Tuple , __lowercase : float | Vector ):
"""simple docstring"""
if isinstance(__lowercase , __lowercase ): # matrix-vector
if len(__lowercase ) == self.__width:
__lowercase =zero_vector(self.__height )
for i in range(self.__height ):
__lowercase =[
self.__matrix[i][j] * other.component(__lowercase )
for j in range(self.__width )
]
ans.change_component(__lowercase , sum(__lowercase ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(__lowercase , (int, float) ): # matrix-scalar
__lowercase =[
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(__lowercase , self.__width , self.__height )
return None
def snake_case ( self : int ):
"""simple docstring"""
return self.__height
def snake_case ( self : List[str] ):
"""simple docstring"""
return self.__width
def snake_case ( self : Dict , __lowercase : int , __lowercase : int ):
"""simple docstring"""
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def snake_case ( self : Dict , __lowercase : int , __lowercase : int , __lowercase : float ):
"""simple docstring"""
if 0 <= x < self.__height and 0 <= y < self.__width:
__lowercase =value
else:
raise Exception('change_component: indices out of bounds' )
def snake_case ( self : Dict , __lowercase : int , __lowercase : int ):
"""simple docstring"""
if self.__height != self.__width:
raise Exception('Matrix is not square' )
__lowercase =self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(__lowercase ) ):
__lowercase =minor[i][:y] + minor[i][y + 1 :]
return Matrix(__lowercase , self.__width - 1 , self.__height - 1 ).determinant()
def snake_case ( self : Union[str, Any] , __lowercase : int , __lowercase : int ):
"""simple docstring"""
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(__lowercase , __lowercase )
else:
raise Exception('Indices out of bounds' )
def snake_case ( self : Tuple ):
"""simple docstring"""
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__lowercase =[
self.__matrix[0][y] * self.cofactor(0 , __lowercase ) for y in range(self.__width )
]
return sum(__lowercase )
def __UpperCamelCase ( lowercase__ : int ):
'''simple docstring'''
__lowercase =[[0] * n for _ in range(lowercase__ )]
return Matrix(lowercase__, lowercase__, lowercase__ )
def __UpperCamelCase ( lowercase__ : int, lowercase__ : int, lowercase__ : int, lowercase__ : int ):
'''simple docstring'''
random.seed(lowercase__ )
__lowercase =[
[random.randint(lowercase__, lowercase__ ) for _ in range(lowercase__ )] for _ in range(lowercase__ )
]
return Matrix(lowercase__, lowercase__, lowercase__ )
| 141 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'''microsoft/biogpt''': '''https://huggingface.co/microsoft/biogpt/resolve/main/config.json''',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class _snake_case ( a__ ):
snake_case__ = "biogpt"
def __init__( self : Tuple , UpperCAmelCase : Dict=42384 , UpperCAmelCase : Union[str, Any]=1024 , UpperCAmelCase : Any=24 , UpperCAmelCase : Union[str, Any]=16 , UpperCAmelCase : Tuple=4096 , UpperCAmelCase : str="gelu" , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Union[str, Any]=0.1 , UpperCAmelCase : Optional[int]=1024 , UpperCAmelCase : int=0.0_2 , UpperCAmelCase : Union[str, Any]=1E-12 , UpperCAmelCase : str=True , UpperCAmelCase : str=True , UpperCAmelCase : Union[str, Any]=0.0 , UpperCAmelCase : Tuple=0.0 , UpperCAmelCase : Dict=1 , UpperCAmelCase : Dict=0 , UpperCAmelCase : Optional[int]=2 , **UpperCAmelCase : str , ):
__lowerCamelCase : Any = vocab_size
__lowerCamelCase : List[Any] = max_position_embeddings
__lowerCamelCase : List[str] = hidden_size
__lowerCamelCase : Optional[Any] = num_hidden_layers
__lowerCamelCase : int = num_attention_heads
__lowerCamelCase : Tuple = intermediate_size
__lowerCamelCase : Optional[int] = hidden_act
__lowerCamelCase : Optional[int] = hidden_dropout_prob
__lowerCamelCase : int = attention_probs_dropout_prob
__lowerCamelCase : Union[str, Any] = initializer_range
__lowerCamelCase : int = layer_norm_eps
__lowerCamelCase : int = scale_embedding
__lowerCamelCase : int = use_cache
__lowerCamelCase : Any = layerdrop
__lowerCamelCase : Any = activation_dropout
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase ) | 64 | """simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def lowercase_ ( _lowerCamelCase: str , _lowerCamelCase: Optional[int]=7 ) -> int:
'''simple docstring'''
__lowerCamelCase : List[str] = None
if token is not None:
__lowerCamelCase : List[Any] = {"Accept": "application/vnd.github+json", "Authorization": F"""Bearer {token}"""}
# The id of a workflow (not of a workflow run)
__lowerCamelCase : Optional[Any] = "636036"
__lowerCamelCase : Dict = F"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"""
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"""
__lowerCamelCase : List[str] = requests.get(_lowerCamelCase , headers=_lowerCamelCase ).json()
return result["workflow_runs"]
def lowercase_ ( _lowerCamelCase: Tuple ) -> int:
'''simple docstring'''
__lowerCamelCase : List[Any] = get_daily_ci_runs(_lowerCamelCase )
__lowerCamelCase : Optional[Any] = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
__lowerCamelCase : Optional[int] = workflow_run["id"]
break
return workflow_run_id
def lowercase_ ( _lowerCamelCase: List[Any] , _lowerCamelCase: int , _lowerCamelCase: str ) -> Any:
'''simple docstring'''
__lowerCamelCase : Any = get_last_daily_ci_runs(_lowerCamelCase )
if workflow_run_id is not None:
__lowerCamelCase : Dict = get_artifacts_links(worflow_run_id=_lowerCamelCase , token=_lowerCamelCase )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
__lowerCamelCase : int = artifacts_links[artifact_name]
download_artifact(
artifact_name=_lowerCamelCase , artifact_url=_lowerCamelCase , output_dir=_lowerCamelCase , token=_lowerCamelCase )
def lowercase_ ( _lowerCamelCase: Union[str, Any] , _lowerCamelCase: Dict , _lowerCamelCase: int ) -> Any:
'''simple docstring'''
get_last_daily_ci_artifacts(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__lowerCamelCase : int = {}
for artifact_name in artifact_names:
__lowerCamelCase : Tuple = os.path.join(_lowerCamelCase , F"""{artifact_name}.zip""" )
if os.path.isfile(_lowerCamelCase ):
__lowerCamelCase : Optional[int] = {}
with zipfile.ZipFile(_lowerCamelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(_lowerCamelCase ):
# read the file
with z.open(_lowerCamelCase ) as f:
__lowerCamelCase : Tuple = f.read().decode("UTF-8" )
return results | 64 | 1 |
"""simple docstring"""
import math
import tensorflow as tf
from packaging import version
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = tf.convert_to_tensor(lowerCamelCase )
UpperCAmelCase__ = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = tf.convert_to_tensor(lowerCamelCase )
UpperCAmelCase__ = tf.cast(math.pi , x.dtype )
UpperCAmelCase__ = tf.cast(0.044715 , x.dtype )
UpperCAmelCase__ = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(lowerCamelCase , 3 )) ))
return x * cdf
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = tf.convert_to_tensor(lowerCamelCase )
return x * tf.tanh(tf.math.softplus(lowerCamelCase ) )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = tf.convert_to_tensor(lowerCamelCase )
UpperCAmelCase__ = tf.cast(0.044715 , x.dtype )
UpperCAmelCase__ = tf.cast(0.7978845608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = tf.convert_to_tensor(lowerCamelCase )
UpperCAmelCase__ = tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def a_ ( lowerCamelCase ):
return tf.clip_by_value(_gelu(lowerCamelCase ) , -1_0 , 1_0 )
def a_ ( lowerCamelCase , lowerCamelCase=-1 ):
UpperCAmelCase__ , UpperCAmelCase__ = tf.split(lowerCamelCase , 2 , axis=lowerCamelCase )
return a * tf.math.sigmoid(lowerCamelCase )
if version.parse(tf.version.VERSION) >= version.parse('2.4'):
def a_ ( lowerCamelCase ):
return tf.keras.activations.gelu(lowerCamelCase , approximate=lowerCamelCase )
lowerCAmelCase__ : str = tf.keras.activations.gelu
lowerCAmelCase__ : Tuple = approximate_gelu_wrap
else:
lowerCAmelCase__ : str = _gelu
lowerCAmelCase__ : Optional[Any] = _gelu_new
lowerCAmelCase__ : Any = {
'gelu': gelu,
'gelu_10': gelu_aa,
'gelu_fast': gelu_fast,
'gelu_new': gelu_new,
'glu': glu,
'mish': mish,
'quick_gelu': quick_gelu,
'relu': tf.keras.activations.relu,
'sigmoid': tf.keras.activations.sigmoid,
'silu': tf.keras.activations.swish,
'swish': tf.keras.activations.swish,
'tanh': tf.keras.activations.tanh,
}
def a_ ( lowerCamelCase ):
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(f'''function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}''' )
| 98 | """simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ : str = {
'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'],
'feature_extraction_mctct': ['MCTCTFeatureExtractor'],
'processing_mctct': ['MCTCTProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Any = [
'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MCTCTForCTC',
'MCTCTModel',
'MCTCTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowerCAmelCase__ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
_lowercase : Tuple = {
"""google/tapas-base-finetuned-sqa""": (
"""https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-wtq""": (
"""https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-wikisql-supervised""": (
"""https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-tabfact""": (
"""https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json"""
),
}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Any = "tapas"
def __init__( self : Tuple , lowerCAmelCase : Optional[int]=30522 , lowerCAmelCase : Any=768 , lowerCAmelCase : List[str]=12 , lowerCAmelCase : int=12 , lowerCAmelCase : Tuple=3072 , lowerCAmelCase : Optional[Any]="gelu" , lowerCAmelCase : List[Any]=0.1 , lowerCAmelCase : Optional[Any]=0.1 , lowerCAmelCase : List[str]=1024 , lowerCAmelCase : Dict=[3, 256, 256, 2, 256, 256, 10] , lowerCAmelCase : List[str]=0.02 , lowerCAmelCase : Optional[int]=1E-12 , lowerCAmelCase : Optional[Any]=0 , lowerCAmelCase : Optional[int]=10.0 , lowerCAmelCase : List[str]=0 , lowerCAmelCase : List[str]=1.0 , lowerCAmelCase : int=None , lowerCAmelCase : Tuple=1.0 , lowerCAmelCase : List[str]=False , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : Tuple=1.0 , lowerCAmelCase : Any=1.0 , lowerCAmelCase : Tuple=False , lowerCAmelCase : Tuple=False , lowerCAmelCase : int="ratio" , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : List[str]=None , lowerCAmelCase : List[Any]=64 , lowerCAmelCase : Union[str, Any]=32 , lowerCAmelCase : Optional[Any]=False , lowerCAmelCase : str=True , lowerCAmelCase : str=False , lowerCAmelCase : Dict=False , lowerCAmelCase : Dict=True , lowerCAmelCase : Optional[Any]=False , lowerCAmelCase : List[Any]=None , lowerCAmelCase : str=None , **lowerCAmelCase : Union[str, Any] , )-> Optional[int]:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase , **lowerCAmelCase )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_sizes
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
# Fine-tuning task hyperparameters
UpperCAmelCase = positive_label_weight
UpperCAmelCase = num_aggregation_labels
UpperCAmelCase = aggregation_loss_weight
UpperCAmelCase = use_answer_as_supervision
UpperCAmelCase = answer_loss_importance
UpperCAmelCase = use_normalized_answer_loss
UpperCAmelCase = huber_loss_delta
UpperCAmelCase = temperature
UpperCAmelCase = aggregation_temperature
UpperCAmelCase = use_gumbel_for_cells
UpperCAmelCase = use_gumbel_for_aggregation
UpperCAmelCase = average_approximation_function
UpperCAmelCase = cell_selection_preference
UpperCAmelCase = answer_loss_cutoff
UpperCAmelCase = max_num_rows
UpperCAmelCase = max_num_columns
UpperCAmelCase = average_logits_per_cell
UpperCAmelCase = select_one_column
UpperCAmelCase = allow_empty_column_selection
UpperCAmelCase = init_cell_selection_weights_to_zero
UpperCAmelCase = reset_position_index_per_cell
UpperCAmelCase = disable_per_token_loss
# Aggregation hyperparameters
UpperCAmelCase = aggregation_labels
UpperCAmelCase = no_aggregation_label_index
if isinstance(self.aggregation_labels , lowerCAmelCase ):
UpperCAmelCase = {int(lowerCAmelCase ): v for k, v in aggregation_labels.items()}
| 358 |
'''simple docstring'''
def lowerCamelCase__ ( A : int , A : int ):
'''simple docstring'''
return int(input_a == input_a == 0 )
def lowerCamelCase__ ( ):
'''simple docstring'''
print('''Truth Table of NOR Gate:''' )
print('''| Input 1 | Input 2 | Output |''' )
print(f"""| 0 | 0 | {nor_gate(0 , 0 )} |""" )
print(f"""| 0 | 1 | {nor_gate(0 , 1 )} |""" )
print(f"""| 1 | 0 | {nor_gate(1 , 0 )} |""" )
print(f"""| 1 | 1 | {nor_gate(1 , 1 )} |""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 91 | 0 |
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
return [sentence[i : i + ngram_size] for i in range(len(_UpperCAmelCase ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 273 |
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class UpperCAmelCase__ :
'''simple docstring'''
UpperCamelCase = None
def snake_case__ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
__UpperCAmelCase : Optional[int] = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , a_ )
def snake_case__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : Union[str, Any] = os.path.join(a_ , '''feat_extract.json''' )
feat_extract_first.to_json_file(a_ )
__UpperCAmelCase : Any = self.feature_extraction_class.from_json_file(a_ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def snake_case__ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : List[str] = feat_extract_first.save_pretrained(a_ )[0]
check_json_file_has_correct_format(a_ )
__UpperCAmelCase : Optional[Any] = self.feature_extraction_class.from_pretrained(a_ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def snake_case__ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : int = self.feature_extraction_class()
self.assertIsNotNone(a_ )
| 226 | 0 |
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase__ ( self ) -> int:
A = tempfile.mkdtemp()
A = 8
# DPR tok
A = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
A = os.path.join(self.tmpdirname ,"""dpr_tokenizer""" )
os.makedirs(lowerCamelCase_ ,exist_ok=lowerCamelCase_ )
A = os.path.join(lowerCamelCase_ ,DPR_VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
# BART tok
A = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
A = dict(zip(lowerCamelCase_ ,range(len(lowerCamelCase_ ) ) ) )
A = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
A = {"""unk_token""": """<unk>"""}
A = os.path.join(self.tmpdirname ,"""bart_tokenizer""" )
os.makedirs(lowerCamelCase_ ,exist_ok=lowerCamelCase_ )
A = os.path.join(lowerCamelCase_ ,BART_VOCAB_FILES_NAMES["""vocab_file"""] )
A = os.path.join(lowerCamelCase_ ,BART_VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCamelCase_ ) )
def UpperCamelCase__ ( self ) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname ,"""dpr_tokenizer""" ) )
def UpperCamelCase__ ( self ) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname ,"""bart_tokenizer""" ) )
def UpperCamelCase__ ( self ) -> Any:
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def UpperCamelCase__ ( self ) -> List[Any]:
A = os.path.join(self.tmpdirname ,"""rag_tokenizer""" )
A = RagConfig(question_encoder=DPRConfig().to_dict() ,generator=BartConfig().to_dict() )
A = RagTokenizer(question_encoder=self.get_dpr_tokenizer() ,generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(lowerCamelCase_ )
rag_tokenizer.save_pretrained(lowerCamelCase_ )
A = RagTokenizer.from_pretrained(lowerCamelCase_ ,config=lowerCamelCase_ )
self.assertIsInstance(new_rag_tokenizer.question_encoder ,lowerCamelCase_ )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() ,rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator ,lowerCamelCase_ )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() ,rag_tokenizer.generator.get_vocab() )
@slow
def UpperCamelCase__ ( self ) -> Union[str, Any]:
A = RagTokenizer.from_pretrained("""facebook/rag-token-nq""" )
A = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
A = tokenizer(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@slow
def UpperCamelCase__ ( self ) -> Tuple:
A = RagTokenizer.from_pretrained("""facebook/rag-sequence-nq""" )
A = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
A = tokenizer(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
| 370 |
"""simple docstring"""
def _A ( _a : int ):
"""simple docstring"""
A = abs(_a )
A = 0
while n > 0:
res += n % 1_0
n //= 1_0
return res
def _A ( _a : int ):
"""simple docstring"""
A = abs(_a )
return n if n < 1_0 else n % 1_0 + sum_of_digits(n // 1_0 )
def _A ( _a : int ):
"""simple docstring"""
return sum(int(_a ) for c in str(abs(_a ) ) )
def _A ( ):
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_a : Callable , _a : int ) -> None:
A = f'{func.__name__}({value})'
A = timeit(f'__main__.{call}' , setup="""import __main__""" )
print(f'{call:56} = {func(_a )} -- {timing:.4f} seconds' )
for value in (2_6_2_1_4_4, 1_1_2_5_8_9_9_9_0_6_8_4_2_6_2_4, 1_2_6_7_6_5_0_6_0_0_2_2_8_2_2_9_4_0_1_4_9_6_7_0_3_2_0_5_3_7_6):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_a , _a )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 77 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A__ : List[str] =StableUnCLIPImgaImgPipeline
A__ : Dict =TEXT_GUIDED_IMAGE_VARIATION_PARAMS
A__ : Optional[Any] =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A__ : Union[str, Any] =frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A__ : int =frozenset([] )
def A_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = 32
SCREAMING_SNAKE_CASE__ = embedder_hidden_size
# image encoding components
SCREAMING_SNAKE_CASE__ = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=UpperCAmelCase_ , projection_dim=UpperCAmelCase_ , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = StableUnCLIPImageNormalizer(embedding_dim=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCAmelCase_ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=UpperCAmelCase_ , layers_per_block=1 , upcast_attention=UpperCAmelCase_ , use_linear_projection=UpperCAmelCase_ , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.00_085 , beta_end=0.012 , prediction_type='v_prediction' , set_alpha_to_one=UpperCAmelCase_ , steps_offset=1 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = AutoencoderKL()
SCREAMING_SNAKE_CASE__ = {
# image encoding components
'feature_extractor': feature_extractor,
'image_encoder': image_encoder.eval(),
# image noising components
'image_normalizer': image_normalizer.eval(),
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder.eval(),
'unet': unet.eval(),
'scheduler': scheduler,
'vae': vae.eval(),
}
return components
def A_ ( self : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int]=0 , UpperCAmelCase_ : int=True ):
if str(UpperCAmelCase_ ).startswith('mps' ):
SCREAMING_SNAKE_CASE__ = torch.manual_seed(UpperCAmelCase_ )
else:
SCREAMING_SNAKE_CASE__ = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
if pil_image:
SCREAMING_SNAKE_CASE__ = input_image * 0.5 + 0.5
SCREAMING_SNAKE_CASE__ = input_image.clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
SCREAMING_SNAKE_CASE__ = DiffusionPipeline.numpy_to_pil(UpperCAmelCase_ )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def A_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = StableUnCLIPImgaImgPipeline(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = sd_pipe.to(UpperCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(UpperCAmelCase_ )
inputs.update({'image_embeds': None} )
SCREAMING_SNAKE_CASE__ = sd_pipe(**UpperCAmelCase_ ).images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.3_872, 0.7_224, 0.5_601, 0.4_741, 0.6_872, 0.5_814, 0.4_636, 0.3_867, 0.5_078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def A_ ( self : Dict ):
SCREAMING_SNAKE_CASE__ = torch_device in ['cpu', 'mps']
self._test_attention_slicing_forward_pass(test_max_difference=UpperCAmelCase_ )
def A_ ( self : Dict ):
SCREAMING_SNAKE_CASE__ = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=UpperCAmelCase_ )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def A_ ( self : int ):
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=UpperCAmelCase_ )
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
def A_ ( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : str ):
SCREAMING_SNAKE_CASE__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
SCREAMING_SNAKE_CASE__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy' )
SCREAMING_SNAKE_CASE__ = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-l-img2img' , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE__ = torch.Generator(device='cpu' ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ = pipe(UpperCAmelCase_ , 'anime turle' , generator=UpperCAmelCase_ , output_type='np' )
SCREAMING_SNAKE_CASE__ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
SCREAMING_SNAKE_CASE__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy' )
SCREAMING_SNAKE_CASE__ = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE__ = torch.Generator(device='cpu' ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ = pipe(UpperCAmelCase_ , 'anime turle' , generator=UpperCAmelCase_ , output_type='np' )
SCREAMING_SNAKE_CASE__ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE__ = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE__ = pipe(
UpperCAmelCase_ , 'anime turtle' , num_inference_steps=2 , output_type='np' , )
SCREAMING_SNAKE_CASE__ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 176 |
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> int:
'''simple docstring'''
def update_area_of_max_square(UpperCamelCase_ , UpperCamelCase_ ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
SCREAMING_SNAKE_CASE__ = update_area_of_max_square(UpperCamelCase_ , col + 1 )
SCREAMING_SNAKE_CASE__ = update_area_of_max_square(row + 1 , col + 1 )
SCREAMING_SNAKE_CASE__ = update_area_of_max_square(row + 1 , UpperCamelCase_ )
if mat[row][col]:
SCREAMING_SNAKE_CASE__ = 1 + min([right, diagonal, down] )
SCREAMING_SNAKE_CASE__ = max(largest_square_area[0] , UpperCamelCase_ )
return sub_problem_sol
else:
return 0
SCREAMING_SNAKE_CASE__ = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> int:
'''simple docstring'''
def update_area_of_max_square_using_dp_array(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
SCREAMING_SNAKE_CASE__ = update_area_of_max_square_using_dp_array(UpperCamelCase_ , col + 1 , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = update_area_of_max_square_using_dp_array(row + 1 , UpperCamelCase_ , UpperCamelCase_ )
if mat[row][col]:
SCREAMING_SNAKE_CASE__ = 1 + min([right, diagonal, down] )
SCREAMING_SNAKE_CASE__ = max(largest_square_area[0] , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = sub_problem_sol
return sub_problem_sol
else:
return 0
SCREAMING_SNAKE_CASE__ = [0]
SCREAMING_SNAKE_CASE__ = [[-1] * cols for _ in range(UpperCamelCase_ )]
update_area_of_max_square_using_dp_array(0 , 0 , UpperCamelCase_ )
return largest_square_area[0]
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [[0] * (cols + 1) for _ in range(rows + 1 )]
SCREAMING_SNAKE_CASE__ = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
SCREAMING_SNAKE_CASE__ = dp_array[row][col + 1]
SCREAMING_SNAKE_CASE__ = dp_array[row + 1][col + 1]
SCREAMING_SNAKE_CASE__ = dp_array[row + 1][col]
if mat[row][col] == 1:
SCREAMING_SNAKE_CASE__ = 1 + min(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = max(dp_array[row][col] , UpperCamelCase_ )
else:
SCREAMING_SNAKE_CASE__ = 0
return largest_square_area
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [0] * (cols + 1)
SCREAMING_SNAKE_CASE__ = [0] * (cols + 1)
SCREAMING_SNAKE_CASE__ = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
SCREAMING_SNAKE_CASE__ = current_row[col + 1]
SCREAMING_SNAKE_CASE__ = next_row[col + 1]
SCREAMING_SNAKE_CASE__ = next_row[col]
if mat[row][col] == 1:
SCREAMING_SNAKE_CASE__ = 1 + min(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = max(current_row[col] , UpperCamelCase_ )
else:
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 176 | 1 |
'''simple docstring'''
import os
# Precomputes a list of the 100 first triangular numbers
lowerCAmelCase : List[Any] =[int(0.5 * n * (n + 1)) for n in range(1, 101)]
def UpperCAmelCase_ ( ):
lowercase_ :List[str] = os.path.dirname(os.path.realpath(__lowerCamelCase ) )
lowercase_ :str = os.path.join(__lowerCamelCase ,"words.txt" )
lowercase_ :List[Any] = ""
with open(__lowerCamelCase ) as f:
lowercase_ :Dict = f.readline()
lowercase_ :List[Any] = [word.strip("\"" ) for word in words.strip("\r\n" ).split("," )]
lowercase_ :str = [
word
for word in [sum(ord(__lowerCamelCase ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(__lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 147 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
lowerCAmelCase : Tuple =logging.get_logger(__name__)
def UpperCAmelCase_ ( __lowerCamelCase : Union[tf.Tensor, np.ndarray] ):
if isinstance(__lowerCamelCase ,np.ndarray ):
return list(tensor.shape )
lowercase_ :Optional[int] = tf.shape(__lowerCamelCase )
if tensor.shape == tf.TensorShape(__lowerCamelCase ):
return dynamic
lowercase_ :Union[str, Any] = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(__lowerCamelCase )]
def UpperCAmelCase_ ( __lowerCamelCase : tf.Tensor ,__lowerCamelCase : Optional[int] = None ,__lowerCamelCase : Optional[str] = None ):
return tf.nn.softmax(logits=logits + 1e-9 ,axis=__lowerCamelCase ,name=__lowerCamelCase )
def UpperCAmelCase_ ( __lowerCamelCase : str ,__lowerCamelCase : List[Any] ,__lowerCamelCase : Any ,__lowerCamelCase : List[str]=1e-5 ,__lowerCamelCase : List[str]=-1 ):
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(__lowerCamelCase ,__lowerCamelCase ):
raise NotImplementedError("Only 1D weight and bias tensors are supported for now, with only a single axis." )
# Get mean and variance on the axis to be normalized
lowercase_ , lowercase_ :List[str] = tf.nn.moments(__lowerCamelCase ,axes=[axis] ,keepdims=__lowerCamelCase )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
lowercase_ :Union[str, Any] = [1] * inputs.shape.rank
lowercase_ :Optional[Any] = shape_list(__lowerCamelCase )[axis]
lowercase_ :List[str] = tf.reshape(__lowerCamelCase ,__lowerCamelCase )
lowercase_ :Dict = tf.reshape(__lowerCamelCase ,__lowerCamelCase )
# Compute layer normalization using the batch_normalization
# function.
lowercase_ :Union[str, Any] = tf.nn.batch_normalization(
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,offset=__lowerCamelCase ,scale=__lowerCamelCase ,variance_epsilon=__lowerCamelCase ,)
return outputs
def UpperCAmelCase_ ( __lowerCamelCase : Optional[int] ,__lowerCamelCase : Union[str, Any]=0 ,__lowerCamelCase : Dict=-1 ):
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
lowercase_ :Optional[int] = tf.shape(__lowerCamelCase )
lowercase_ :Optional[int] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
lowercase_ :List[str] = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] ,axis=0 )
return tf.reshape(__lowerCamelCase ,__lowerCamelCase )
def UpperCAmelCase_ ( __lowerCamelCase : tf.Tensor ):
if not isinstance(__lowerCamelCase ,tf.Tensor ):
lowercase_ :str = tf.convert_to_tensor(__lowerCamelCase ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
lowercase_ :List[Any] = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
lowercase_ :Optional[int] = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
lowercase_ :str = (
tf.cast(1 ,encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def UpperCAmelCase_ ( __lowerCamelCase : tf.Tensor ,__lowerCamelCase : int ,__lowerCamelCase : str = "input_ids" ):
tf.debugging.assert_less(
__lowerCamelCase ,tf.cast(__lowerCamelCase ,dtype=tensor.dtype ) ,message=(
F'The maximum value of {tensor_name} ({tf.math.reduce_max(__lowerCamelCase )}) must be smaller than the embedding '
F'layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'
) ,)
def UpperCAmelCase_ ( __lowerCamelCase : List[str] ,__lowerCamelCase : Tuple ,__lowerCamelCase : Dict ):
lowercase_ :int = 6_45_12
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
lowercase_ :Union[str, Any] = [x for x in data if len(__lowerCamelCase ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"The following attributes cannot be saved to HDF5 file because "
F'they are larger than {HDF5_OBJECT_HEADER_LIMIT} '
F'bytes: {bad_attributes}' )
lowercase_ :Union[str, Any] = np.asarray(__lowerCamelCase )
lowercase_ :Optional[int] = 1
lowercase_ :int = np.array_split(__lowerCamelCase ,__lowerCamelCase )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
lowercase_ :List[Any] = np.array_split(__lowerCamelCase ,__lowerCamelCase )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(__lowerCamelCase ):
lowercase_ :int = chunk_data
else:
lowercase_ :Tuple = data
def UpperCAmelCase_ ( __lowerCamelCase : str ,__lowerCamelCase : Tuple ):
if name in group.attrs:
lowercase_ :Optional[Any] = [n.decode("utf8" ) if hasattr(__lowerCamelCase ,"decode" ) else n for n in group.attrs[name]]
else:
lowercase_ :List[str] = []
lowercase_ :str = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode("utf8" ) if hasattr(__lowerCamelCase ,"decode" ) else n for n in group.attrs["%s%d" % (name, chunk_id)]] )
chunk_id += 1
return data
def UpperCAmelCase_ ( __lowerCamelCase : str ):
def _expand_single_ad_tensor(__lowerCamelCase : Tuple ):
if isinstance(__lowerCamelCase ,tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(__lowerCamelCase ,axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor ,__lowerCamelCase )
| 147 | 1 |
"""simple docstring"""
import functools
from typing import Any
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : list[str] ):
"""simple docstring"""
if not isinstance(snake_case__ , snake_case__ ) or len(snake_case__ ) == 0:
raise ValueError("""the string should be not empty string""" )
if not isinstance(snake_case__ , snake_case__ ) or not all(
isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) > 0 for item in words ):
raise ValueError("""the words should be a list of non-empty strings""" )
# Build trie
_snake_case : dict[str, Any] = {}
_snake_case : str = """WORD_KEEPER"""
for word in words:
_snake_case : int = trie
for c in word:
if c not in trie_node:
_snake_case : str = {}
_snake_case : int = trie_node[c]
_snake_case : str = True
_snake_case : Optional[int] = len(snake_case__ )
# Dynamic programming method
@functools.cache
def is_breakable(snake_case__ : int ) -> bool:
if index == len_string:
return True
_snake_case : Any = trie
for i in range(snake_case__ , snake_case__ ):
_snake_case : int = trie_node.get(string[i] , snake_case__ )
if trie_node is None:
return False
if trie_node.get(snake_case__ , snake_case__ ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
A_ = r'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(__a )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "rag"
lowercase__ = True
def __init__( self: Union[str, Any], a_: int=None, a_: Tuple=True, a_: Optional[int]=None, a_: List[str]=None, a_: int=None, a_: Optional[Any]=None, a_: List[str]=None, a_: Optional[Any]=" / ", a_: Tuple=" // ", a_: List[Any]=5, a_: Dict=300, a_: Tuple=768, a_: Optional[Any]=8, a_: int="wiki_dpr", a_: Any="train", a_: Optional[int]="compressed", a_: Optional[int]=None, a_: List[Any]=None, a_: Optional[Any]=False, a_: str=False, a_: Dict=0.0, a_: Union[str, Any]=True, a_: Union[str, Any]=False, a_: str=False, a_: List[str]=False, a_: Union[str, Any]=True, a_: Any=None, **a_: List[Any], ):
'''simple docstring'''
super().__init__(
bos_token_id=a_, pad_token_id=a_, eos_token_id=a_, decoder_start_token_id=a_, forced_eos_token_id=a_, is_encoder_decoder=a_, prefix=a_, vocab_size=a_, **a_, )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_snake_case : Union[str, Any] = kwargs.pop("""question_encoder""" )
_snake_case : List[str] = question_encoder_config.pop("""model_type""" )
_snake_case : Union[str, Any] = kwargs.pop("""generator""" )
_snake_case : Any = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
_snake_case : Union[str, Any] = AutoConfig.for_model(a_, **a_ )
_snake_case : Optional[Any] = AutoConfig.for_model(a_, **a_ )
_snake_case : Any = reduce_loss
_snake_case : Optional[int] = label_smoothing
_snake_case : Dict = exclude_bos_score
_snake_case : int = do_marginalize
_snake_case : Optional[Any] = title_sep
_snake_case : Any = doc_sep
_snake_case : List[str] = n_docs
_snake_case : Tuple = max_combined_length
_snake_case : Optional[Any] = dataset
_snake_case : Union[str, Any] = dataset_split
_snake_case : Tuple = index_name
_snake_case : Any = retrieval_vector_size
_snake_case : Union[str, Any] = retrieval_batch_size
_snake_case : str = passages_path
_snake_case : Tuple = index_path
_snake_case : List[Any] = use_dummy_dataset
_snake_case : Optional[Any] = output_retrieved
_snake_case : Tuple = do_deduplication
_snake_case : Union[str, Any] = use_cache
if self.forced_eos_token_id is None:
_snake_case : Dict = getattr(self.generator, """forced_eos_token_id""", a_ )
@classmethod
def UpperCamelCase_ ( cls: Any, a_: PretrainedConfig, a_: PretrainedConfig, **a_: Optional[Any] ):
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict(), generator=generator_config.to_dict(), **a_ )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = copy.deepcopy(self.__dict__ )
_snake_case : List[str] = self.question_encoder.to_dict()
_snake_case : Tuple = self.generator.to_dict()
_snake_case : Dict = self.__class__.model_type
return output
| 64 | 1 |
"""simple docstring"""
from math import factorial
def __A ( a_ :int = 1_00) -> int:
return sum(int(a_) for x in str(factorial(a_)))
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip()))) | 361 |
"""simple docstring"""
def __A ( a_ :float) -> float:
if edge <= 0 or not isinstance(a_ , a_):
raise ValueError('''Length must be a positive.''')
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def __A ( a_ :float) -> float:
if edge <= 0 or not isinstance(a_ , a_):
raise ValueError('''Length must be a positive.''')
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod() | 188 | 0 |
'''simple docstring'''
__a = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def __UpperCAmelCase ( ):
_UpperCAmelCase : Optional[int] = input("Enter message: " )
_UpperCAmelCase : str = input("Enter key [alphanumeric]: " )
_UpperCAmelCase : Optional[int] = input("Encrypt/Decrypt [e/d]: " )
if mode.lower().startswith("e" ):
_UpperCAmelCase : str = '''encrypt'''
_UpperCAmelCase : List[Any] = encrypt_message(__a, __a )
elif mode.lower().startswith("d" ):
_UpperCAmelCase : Dict = '''decrypt'''
_UpperCAmelCase : Union[str, Any] = decrypt_message(__a, __a )
print(f"""\n{mode.title()}ed message:""" )
print(__a )
def __UpperCAmelCase ( a_: Union[str, Any], a_: List[Any] ):
return translate_message(__a, __a, "encrypt" )
def __UpperCAmelCase ( a_: Dict, a_: Dict ):
return translate_message(__a, __a, "decrypt" )
def __UpperCAmelCase ( a_: List[str], a_: List[str], a_: List[Any] ):
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : Union[str, Any] = 0
_UpperCAmelCase : Tuple = key.upper()
for symbol in message:
_UpperCAmelCase : List[Any] = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(__a )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(__a ):
_UpperCAmelCase : Dict = 0
else:
translated.append(__a )
return "".join(__a )
if __name__ == "__main__":
main() | 145 |
"""simple docstring"""
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowercase_))
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowercase_))
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowercase_))
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowercase_))
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
# Removed: 'text_encoder/model.safetensors',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertFalse(is_safetensors_compatible(lowercase_))
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
SCREAMING_SNAKE_CASE_ : Any = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_))
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = [
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
SCREAMING_SNAKE_CASE_ : Dict = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_))
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
SCREAMING_SNAKE_CASE_ : Any = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_))
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
SCREAMING_SNAKE_CASE_ : List[Any] = '''fp16'''
self.assertFalse(is_safetensors_compatible(lowercase_ , variant=lowercase_))
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = [
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
]
SCREAMING_SNAKE_CASE_ : Any = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_))
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
SCREAMING_SNAKE_CASE_ : List[Any] = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_))
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
# 'text_encoder/model.fp16.safetensors',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
SCREAMING_SNAKE_CASE_ : str = '''fp16'''
self.assertFalse(is_safetensors_compatible(lowercase_ , variant=lowercase_))
| 91 | 0 |
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def _lowercase ( self : Dict , __A : Optional[Any]=0 ):
snake_case__ : Union[str, Any] = np.random.RandomState(__A )
snake_case__ : List[str] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def _lowercase ( self : Any ):
snake_case__ : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=__A )
snake_case__ : Union[str, Any] = self.get_dummy_inputs()
snake_case__ : int = pipe(**__A ).images
snake_case__ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
snake_case__ : Optional[int] = np.array([0.6_5_0_7_2, 0.5_8_4_9_2, 0.4_8_2_1_9, 0.5_5_5_2_1, 0.5_3_1_8_0, 0.5_5_9_3_9, 0.5_0_6_9_7, 0.3_9_8_0_0, 0.4_6_4_5_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase ( self : Optional[int] ):
snake_case__ : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
snake_case__ : List[Any] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__A )
pipe.set_progress_bar_config(disable=__A )
snake_case__ : List[Any] = self.get_dummy_inputs()
snake_case__ : str = pipe(**__A ).images
snake_case__ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
snake_case__ : List[Any] = np.array([0.6_5_8_6_3, 0.5_9_4_2_5, 0.4_9_3_2_6, 0.5_6_3_1_3, 0.5_3_8_7_5, 0.5_6_6_2_7, 0.5_1_0_6_5, 0.3_9_7_7_7, 0.4_6_3_3_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase ( self : List[Any] ):
snake_case__ : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
snake_case__ : str = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__A )
snake_case__ : Dict = self.get_dummy_inputs()
snake_case__ : Any = pipe(**__A ).images
snake_case__ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
snake_case__ : Tuple = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase ( self : List[str] ):
snake_case__ : int = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
snake_case__ : Any = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__A )
snake_case__ : int = self.get_dummy_inputs()
snake_case__ : Tuple = pipe(**__A ).images
snake_case__ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
snake_case__ : Any = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase ( self : Dict ):
snake_case__ : Any = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
snake_case__ : List[str] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__A )
snake_case__ : Optional[Any] = self.get_dummy_inputs()
snake_case__ : Dict = pipe(**__A ).images
snake_case__ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
snake_case__ : List[str] = np.array([0.5_3_8_1_7, 0.6_0_8_1_2, 0.4_7_3_8_4, 0.4_9_5_3_0, 0.5_1_8_9_4, 0.4_9_8_1_4, 0.4_7_9_8_4, 0.3_8_9_5_8, 0.4_4_2_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase ( self : int ):
snake_case__ : int = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
snake_case__ : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__A )
snake_case__ : Tuple = self.get_dummy_inputs()
snake_case__ : List[str] = pipe(**__A ).images
snake_case__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
snake_case__ : Union[str, Any] = np.array([0.5_3_8_9_5, 0.6_0_8_0_8, 0.4_7_9_3_3, 0.4_9_6_0_8, 0.5_1_8_8_6, 0.4_9_9_5_0, 0.4_8_0_5_3, 0.3_8_9_5_7, 0.4_4_2_0_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase ( self : Any ):
snake_case__ : str = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=__A )
snake_case__ : Union[str, Any] = self.get_dummy_inputs()
snake_case__ : List[Any] = 3 * [inputs["prompt"]]
# forward
snake_case__ : Tuple = pipe(**__A )
snake_case__ : str = output.images[0, -3:, -3:, -1]
snake_case__ : Optional[Any] = self.get_dummy_inputs()
snake_case__ : Any = 3 * [inputs.pop("prompt" )]
snake_case__ : List[str] = pipe.tokenizer(
__A , padding="max_length" , max_length=pipe.tokenizer.model_max_length , truncation=__A , return_tensors="np" , )
snake_case__ : str = text_inputs["input_ids"]
snake_case__ : List[Any] = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
snake_case__ : Union[str, Any] = prompt_embeds
# forward
snake_case__ : Any = pipe(**__A )
snake_case__ : List[str] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
def _lowercase ( self : Tuple ):
snake_case__ : Tuple = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=__A )
snake_case__ : Optional[int] = self.get_dummy_inputs()
snake_case__ : List[str] = 3 * ["this is a negative prompt"]
snake_case__ : List[Any] = negative_prompt
snake_case__ : Union[str, Any] = 3 * [inputs["prompt"]]
# forward
snake_case__ : Optional[int] = pipe(**__A )
snake_case__ : List[str] = output.images[0, -3:, -3:, -1]
snake_case__ : str = self.get_dummy_inputs()
snake_case__ : str = 3 * [inputs.pop("prompt" )]
snake_case__ : Optional[int] = []
for p in [prompt, negative_prompt]:
snake_case__ : int = pipe.tokenizer(
__A , padding="max_length" , max_length=pipe.tokenizer.model_max_length , truncation=__A , return_tensors="np" , )
snake_case__ : int = text_inputs["input_ids"]
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
snake_case__ : str = embeds
# forward
snake_case__ : Any = pipe(**__A )
snake_case__ : str = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@nightly
@require_onnxruntime
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@property
def _lowercase ( self : Any ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _lowercase ( self : Dict ):
snake_case__ : int = ort.SessionOptions()
snake_case__ : Union[str, Any] = False
return options
def _lowercase ( self : Tuple ):
# using the PNDM scheduler by default
snake_case__ : Tuple = OnnxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=__A , feature_extractor=__A , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=__A )
snake_case__ : List[Any] = "A painting of a squirrel eating a burger"
np.random.seed(0 )
snake_case__ : List[str] = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=1_0 , output_type="np" )
snake_case__ : str = output.images
snake_case__ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case__ : str = np.array([0.0_4_5_2, 0.0_3_9_0, 0.0_0_8_7, 0.0_3_5_0, 0.0_6_1_7, 0.0_3_6_4, 0.0_5_4_4, 0.0_5_2_3, 0.0_7_2_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _lowercase ( self : List[Any] ):
snake_case__ : Tuple = DDIMScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
snake_case__ : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=__A , safety_checker=__A , feature_extractor=__A , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=__A )
snake_case__ : List[str] = "open neural network exchange"
snake_case__ : int = np.random.RandomState(0 )
snake_case__ : str = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=1_0 , generator=__A , output_type="np" )
snake_case__ : Optional[int] = output.images
snake_case__ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case__ : Optional[int] = np.array([0.2_8_6_7, 0.1_9_7_4, 0.1_4_8_1, 0.7_2_9_4, 0.7_2_5_1, 0.6_6_6_7, 0.4_1_9_4, 0.5_6_4_2, 0.6_4_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _lowercase ( self : str ):
snake_case__ : Dict = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
snake_case__ : Any = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=__A , safety_checker=__A , feature_extractor=__A , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=__A )
snake_case__ : Optional[Any] = "open neural network exchange"
snake_case__ : int = np.random.RandomState(0 )
snake_case__ : List[str] = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=1_0 , generator=__A , output_type="np" )
snake_case__ : int = output.images
snake_case__ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case__ : Dict = np.array([0.2_3_0_6, 0.1_9_5_9, 0.1_5_9_3, 0.6_5_4_9, 0.6_3_9_4, 0.5_4_0_8, 0.5_0_6_5, 0.6_0_1_0, 0.6_1_6_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _lowercase ( self : List[str] ):
snake_case__ : Union[str, Any] = 0
def test_callback_fn(__A : int , __A : int , __A : np.ndarray ) -> None:
snake_case__ : List[Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 6_4, 6_4)
snake_case__ : List[Any] = latents[0, -3:, -3:, -1]
snake_case__ : int = np.array(
[-0.6_7_7_2, -0.3_8_3_5, -1.2_4_5_6, 0.1_9_0_5, -1.0_9_7_4, 0.6_9_6_7, -1.9_3_5_3, 0.0_1_7_8, 1.0_1_6_7] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
elif step == 5:
assert latents.shape == (1, 4, 6_4, 6_4)
snake_case__ : Union[str, Any] = latents[0, -3:, -3:, -1]
snake_case__ : List[str] = np.array(
[-0.3_3_5_1, 0.2_2_4_1, -0.1_8_3_7, -0.2_3_2_5, -0.6_5_7_7, 0.3_3_9_3, -0.0_2_4_1, 0.5_8_9_9, 1.3_8_7_5] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
snake_case__ : Dict = False
snake_case__ : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , safety_checker=__A , feature_extractor=__A , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__A )
snake_case__ : str = "Andromeda galaxy in a bottle"
snake_case__ : Union[str, Any] = np.random.RandomState(0 )
pipe(
prompt=__A , num_inference_steps=5 , guidance_scale=7.5 , generator=__A , callback=__A , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def _lowercase ( self : List[Any] ):
snake_case__ : Dict = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , safety_checker=__A , feature_extractor=__A , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(__A , __A )
assert pipe.safety_checker is None
snake_case__ : Union[str, Any] = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__A )
snake_case__ : List[str] = OnnxStableDiffusionPipeline.from_pretrained(__A )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
snake_case__ : Tuple = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
| 360 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
__lowerCamelCase : str = {
"""andreasmadsen/efficient_mlm_m0.40""": (
"""https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"""
),
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "roberta-prelayernorm"
def __init__( self : Tuple , __A : Any=5_0_2_6_5 , __A : Optional[int]=7_6_8 , __A : Dict=1_2 , __A : Union[str, Any]=1_2 , __A : List[Any]=3_0_7_2 , __A : Optional[Any]="gelu" , __A : Optional[int]=0.1 , __A : Tuple=0.1 , __A : Optional[Any]=5_1_2 , __A : List[str]=2 , __A : Optional[int]=0.0_2 , __A : Tuple=1e-1_2 , __A : Any=1 , __A : str=0 , __A : int=2 , __A : List[str]="absolute" , __A : Optional[Any]=True , __A : List[Any]=None , **__A : Optional[Any] , ):
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
snake_case__ : Tuple = vocab_size
snake_case__ : Optional[Any] = hidden_size
snake_case__ : List[Any] = num_hidden_layers
snake_case__ : List[Any] = num_attention_heads
snake_case__ : Dict = hidden_act
snake_case__ : Union[str, Any] = intermediate_size
snake_case__ : List[Any] = hidden_dropout_prob
snake_case__ : Any = attention_probs_dropout_prob
snake_case__ : int = max_position_embeddings
snake_case__ : Tuple = type_vocab_size
snake_case__ : Optional[int] = initializer_range
snake_case__ : int = layer_norm_eps
snake_case__ : Dict = position_embedding_type
snake_case__ : int = use_cache
snake_case__ : Dict = classifier_dropout
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
@property
def _lowercase ( self : Optional[int] ):
if self.task == "multiple-choice":
snake_case__ : List[str] = {0: "batch", 1: "choice", 2: "sequence"}
else:
snake_case__ : Tuple = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 286 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.