code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
import string
def __a ( SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
__UpperCAmelCase = ''''''
for i in sequence:
__UpperCAmelCase = ord(SCREAMING_SNAKE_CASE )
if 6_5 <= extract <= 9_0:
output += chr(1_5_5 - extract )
elif 9_7 <= extract <= 1_2_2:
output += chr(2_1_9 - extract )
else:
output += i
return output
def __a ( SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
__UpperCAmelCase = string.ascii_letters
__UpperCAmelCase = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(SCREAMING_SNAKE_CASE )] if c in letters else c for c in sequence )
def __a ( ) -> None:
'''simple docstring'''
from timeit import timeit
print('''Running performance benchmarks...''' )
__UpperCAmelCase = '''from string import printable ; from __main__ import atbash, atbash_slow'''
print(f'''> atbash_slow(): {timeit('atbash_slow(printable)' , setup=SCREAMING_SNAKE_CASE )} seconds''' )
print(f'''> atbash(): {timeit('atbash(printable)' , setup=SCREAMING_SNAKE_CASE )} seconds''' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(F"""{example} encrypted in atbash: {atbash(example)}""")
benchmark()
| 333 |
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
__UpperCAmelCase = [[] for _ in range(SCREAMING_SNAKE_CASE )]
__UpperCAmelCase = key - 1
if key <= 0:
raise ValueError('''Height of grid can\'t be 0 or negative''' )
if key == 1 or len(SCREAMING_SNAKE_CASE ) <= key:
return input_string
for position, character in enumerate(SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = position % (lowest * 2) # puts it in bounds
__UpperCAmelCase = min(SCREAMING_SNAKE_CASE , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = [''''''.join(SCREAMING_SNAKE_CASE ) for row in temp_grid]
__UpperCAmelCase = ''''''.join(SCREAMING_SNAKE_CASE )
return output_string
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
__UpperCAmelCase = []
__UpperCAmelCase = key - 1
if key <= 0:
raise ValueError('''Height of grid can\'t be 0 or negative''' )
if key == 1:
return input_string
__UpperCAmelCase = [[] for _ in range(SCREAMING_SNAKE_CASE )] # generates template
for position in range(len(SCREAMING_SNAKE_CASE ) ):
__UpperCAmelCase = position % (lowest * 2) # puts it in bounds
__UpperCAmelCase = min(SCREAMING_SNAKE_CASE , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append('''*''' )
__UpperCAmelCase = 0
for row in temp_grid: # fills in the characters
__UpperCAmelCase = input_string[counter : counter + len(SCREAMING_SNAKE_CASE )]
grid.append(list(SCREAMING_SNAKE_CASE ) )
counter += len(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = '''''' # reads as zigzag
for position in range(len(SCREAMING_SNAKE_CASE ) ):
__UpperCAmelCase = position % (lowest * 2) # puts it in bounds
__UpperCAmelCase = min(SCREAMING_SNAKE_CASE , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def __a ( SCREAMING_SNAKE_CASE ) -> dict[int, str]:
'''simple docstring'''
__UpperCAmelCase = {}
for key_guess in range(1 , len(SCREAMING_SNAKE_CASE ) ): # tries every key
__UpperCAmelCase = decrypt(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 | 1 |
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
UpperCAmelCase_ : Any = '''\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
'''
UpperCAmelCase_ : Any = '''\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
'''
UpperCAmelCase_ : str = '''
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"pearson": Pearson Correlation
"spearmanr": Spearman Correlation
"matthews_correlation": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})
{\'pearson\': 1.0, \'spearmanr\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'cola\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Union[str, Any] , __magic_name__ : List[str] ) -> int:
"""simple docstring"""
return float((preds == labels).mean() )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Tuple , __magic_name__ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase :Tuple = simple_accuracy(__magic_name__ , __magic_name__ )
UpperCamelCase :List[Any] = float(fa_score(y_true=__magic_name__ , y_pred=__magic_name__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict , __magic_name__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase :Tuple = float(pearsonr(__magic_name__ , __magic_name__ )[0] )
UpperCamelCase :Optional[int] = float(spearmanr(__magic_name__ , __magic_name__ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE ( datasets.Metric ):
def _A ( self : Dict ):
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ),
"""references""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" , )
def _A ( self : List[str] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] ):
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(__lowerCamelCase , __lowerCamelCase )}
elif self.config_name == "stsb":
return pearson_and_spearman(__lowerCamelCase , __lowerCamelCase )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(__lowerCamelCase , __lowerCamelCase )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(__lowerCamelCase , __lowerCamelCase )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" )
| 62 |
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Tuple , __magic_name__ : Any , __magic_name__ : str ) -> Optional[int]:
"""simple docstring"""
return params[f"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :]
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Any , __magic_name__ : str , __magic_name__ : List[str] , __magic_name__ : Tuple="attention" ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase :Any = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :] )
UpperCamelCase :Any = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
UpperCamelCase :Optional[Any] = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :] )
UpperCamelCase :Tuple = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
UpperCamelCase :Any = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :] )
UpperCamelCase :List[str] = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
UpperCamelCase :int = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :] )
UpperCamelCase :Optional[int] = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int]=False ) -> Optional[int]:
"""simple docstring"""
if split_mlp_wi:
UpperCamelCase :Tuple = params[f"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :]
UpperCamelCase :Optional[int] = params[f"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :]
UpperCamelCase :Union[str, Any] = (wi_a, wi_a)
else:
UpperCamelCase :Union[str, Any] = params[f"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :]
UpperCamelCase :List[str] = params[f"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :]
return wi, wo
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : str ) -> List[Any]:
"""simple docstring"""
return params[f"""{prefix}/{prefix}/{layer_name}/scale"""][:, i]
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : dict , *, __magic_name__ : int , __magic_name__ : bool , __magic_name__ : bool = False ) -> List[str]:
"""simple docstring"""
UpperCamelCase :str = traverse_util.flatten_dict(variables["""target"""] )
UpperCamelCase :int = {"""/""".join(__magic_name__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
UpperCamelCase :Union[str, Any] = """encoder/encoder/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , __magic_name__ )
UpperCamelCase :Tuple = collections.OrderedDict()
# Shared embeddings.
UpperCamelCase :Optional[int] = old["""token_embedder/embedding"""]
# Encoder.
for i in range(__magic_name__ ):
# Block i, layer 0 (Self Attention).
UpperCamelCase :List[Any] = tax_layer_norm_lookup(__magic_name__ , __magic_name__ , """encoder""" , """pre_attention_layer_norm""" )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :Union[str, Any] = tax_attention_lookup(__magic_name__ , __magic_name__ , """encoder""" , """attention""" )
UpperCamelCase :Dict = layer_norm
UpperCamelCase :str = k.T
UpperCamelCase :int = o.T
UpperCamelCase :Optional[int] = q.T
UpperCamelCase :List[str] = v.T
# Block i, layer 1 (MLP).
UpperCamelCase :Union[str, Any] = tax_layer_norm_lookup(__magic_name__ , __magic_name__ , """encoder""" , """pre_mlp_layer_norm""" )
UpperCamelCase , UpperCamelCase :List[Any] = tax_mlp_lookup(__magic_name__ , __magic_name__ , """encoder""" , __magic_name__ )
UpperCamelCase :Dict = layer_norm
if split_mlp_wi:
UpperCamelCase :Union[str, Any] = wi[0].T
UpperCamelCase :List[str] = wi[1].T
else:
UpperCamelCase :str = wi.T
UpperCamelCase :Dict = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
UpperCamelCase :List[Any] = tax_relpos_bias_lookup(
__magic_name__ , __magic_name__ , """encoder""" ).T
UpperCamelCase :Dict = old["""encoder/encoder_norm/scale"""]
if not scalable_attention:
UpperCamelCase :Optional[Any] = tax_relpos_bias_lookup(
__magic_name__ , 0 , """encoder""" ).T
UpperCamelCase :str = tax_relpos_bias_lookup(
__magic_name__ , 0 , """decoder""" ).T
if not is_encoder_only:
# Decoder.
for i in range(__magic_name__ ):
# Block i, layer 0 (Self Attention).
UpperCamelCase :Tuple = tax_layer_norm_lookup(__magic_name__ , __magic_name__ , """decoder""" , """pre_self_attention_layer_norm""" )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :Any = tax_attention_lookup(__magic_name__ , __magic_name__ , """decoder""" , """self_attention""" )
UpperCamelCase :Any = layer_norm
UpperCamelCase :Tuple = k.T
UpperCamelCase :Optional[Any] = o.T
UpperCamelCase :List[Any] = q.T
UpperCamelCase :Optional[Any] = v.T
# Block i, layer 1 (Cross Attention).
UpperCamelCase :List[str] = tax_layer_norm_lookup(__magic_name__ , __magic_name__ , """decoder""" , """pre_cross_attention_layer_norm""" )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :int = tax_attention_lookup(__magic_name__ , __magic_name__ , """decoder""" , """encoder_decoder_attention""" )
UpperCamelCase :Union[str, Any] = layer_norm
UpperCamelCase :int = k.T
UpperCamelCase :Union[str, Any] = o.T
UpperCamelCase :Optional[Any] = q.T
UpperCamelCase :List[str] = v.T
# Block i, layer 2 (MLP).
UpperCamelCase :Tuple = tax_layer_norm_lookup(__magic_name__ , __magic_name__ , """decoder""" , """pre_mlp_layer_norm""" )
UpperCamelCase , UpperCamelCase :Optional[int] = tax_mlp_lookup(__magic_name__ , __magic_name__ , """decoder""" , __magic_name__ )
UpperCamelCase :Optional[int] = layer_norm
if split_mlp_wi:
UpperCamelCase :List[Any] = wi[0].T
UpperCamelCase :Tuple = wi[1].T
else:
UpperCamelCase :Any = wi.T
UpperCamelCase :List[Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
UpperCamelCase :Optional[int] = tax_relpos_bias_lookup(__magic_name__ , __magic_name__ , """decoder""" ).T
UpperCamelCase :int = old["""decoder/decoder_norm/scale"""]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
UpperCamelCase :Dict = old["""decoder/logits_dense/kernel"""].T
return new
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Union[str, Any] , __magic_name__ : bool ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase :Tuple = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
UpperCamelCase :str = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
UpperCamelCase :Union[str, Any] = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
UpperCamelCase :Dict = state_dict["""shared.weight"""]
return state_dict
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[Any] , __magic_name__ : Any , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : int ) -> List[str]:
"""simple docstring"""
UpperCamelCase :Union[str, Any] = checkpoints.load_tax_checkpoint(__magic_name__ )
UpperCamelCase :Optional[Any] = convert_tax_to_pytorch(
__magic_name__ , num_layers=config.num_layers , is_encoder_only=__magic_name__ , scalable_attention=__magic_name__ )
UpperCamelCase :Optional[int] = make_state_dict(__magic_name__ , __magic_name__ )
model.load_state_dict(__magic_name__ , strict=__magic_name__ )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : int , __magic_name__ : bool = False , __magic_name__ : bool = False , ) -> List[str]:
"""simple docstring"""
UpperCamelCase :Tuple = MTaConfig.from_json_file(__magic_name__ )
print(f"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
UpperCamelCase :Optional[Any] = UMTaEncoderModel(__magic_name__ )
else:
UpperCamelCase :Tuple = UMTaForConditionalGeneration(__magic_name__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__magic_name__ )
# Verify that we can load the checkpoint.
model.from_pretrained(__magic_name__ )
print("""Done""" )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
UpperCAmelCase_ : str = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 62 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "facebook/bart-large-mnli"
SCREAMING_SNAKE_CASE_ = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
SCREAMING_SNAKE_CASE_ = "text_classifier"
SCREAMING_SNAKE_CASE_ = AutoTokenizer
SCREAMING_SNAKE_CASE_ = AutoModelForSequenceClassification
SCREAMING_SNAKE_CASE_ = ["text", ["text"]]
SCREAMING_SNAKE_CASE_ = ["text"]
def a_ ( self) -> Dict:
super().setup()
snake_case_ = self.model.config
snake_case_ = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail'):
snake_case_ = int(lowerCAmelCase__)
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.')
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> List[Any]:
snake_case_ = labels
return self.pre_processor(
[text] * len(lowerCAmelCase__), [f'This example is {label}' for label in labels], return_tensors='pt', padding='max_length', )
def a_ ( self, lowerCAmelCase__) -> Tuple:
snake_case_ = outputs.logits
snake_case_ = torch.argmax(logits[:, 2]).item()
return self._labels[label_id]
| 69 |
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
snake_case__ : List[Any] = '''platform'''
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class snake_case_:
__UpperCamelCase = PegasusConfig
__UpperCamelCase = {}
__UpperCamelCase = '''gelu'''
def __init__( self : List[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Any=1_3 , UpperCamelCase_ : List[Any]=7 , UpperCamelCase_ : Tuple=True , UpperCamelCase_ : List[Any]=False , UpperCamelCase_ : Optional[Any]=9_9 , UpperCamelCase_ : Any=3_2 , UpperCamelCase_ : List[Any]=5 , UpperCamelCase_ : str=4 , UpperCamelCase_ : str=3_7 , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : Any=2_0 , UpperCamelCase_ : Dict=2 , UpperCamelCase_ : List[str]=1 , UpperCamelCase_ : Any=0 , ):
lowerCAmelCase : List[Any] = parent
lowerCAmelCase : Optional[int] = batch_size
lowerCAmelCase : Any = seq_length
lowerCAmelCase : Dict = is_training
lowerCAmelCase : Optional[int] = use_labels
lowerCAmelCase : Union[str, Any] = vocab_size
lowerCAmelCase : Tuple = hidden_size
lowerCAmelCase : Any = num_hidden_layers
lowerCAmelCase : List[str] = num_attention_heads
lowerCAmelCase : Optional[Any] = intermediate_size
lowerCAmelCase : Optional[int] = hidden_dropout_prob
lowerCAmelCase : List[Any] = attention_probs_dropout_prob
lowerCAmelCase : str = max_position_embeddings
lowerCAmelCase : str = eos_token_id
lowerCAmelCase : List[Any] = pad_token_id
lowerCAmelCase : List[str] = bos_token_id
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
lowerCAmelCase : Union[str, Any] = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
lowerCAmelCase : List[str] = np.concatenate([input_ids, eos_tensor] , axis=1 )
lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Optional[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowerCAmelCase : Dict = prepare_pegasus_inputs_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return config, inputs_dict
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict ):
lowerCAmelCase : Any = 2_0
lowerCAmelCase : Any = model_class_name(UpperCamelCase_ )
lowerCAmelCase : List[str] = model.encode(inputs_dict['''input_ids'''] )
lowerCAmelCase, lowerCAmelCase : Optional[Any] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowerCAmelCase : Any = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
lowerCAmelCase : Dict = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCAmelCase : Optional[int] = model.decode(
decoder_input_ids[:, :-1] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , )
lowerCAmelCase : int = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowerCAmelCase : int = model.decode(
decoder_input_ids[:, -1:] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCamelCase_ , )
lowerCAmelCase : List[Any] = model.decode(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Any , UpperCamelCase_ : Dict ):
lowerCAmelCase : Dict = 2_0
lowerCAmelCase : Union[str, Any] = model_class_name(UpperCamelCase_ )
lowerCAmelCase : Any = model.encode(inputs_dict['''input_ids'''] )
lowerCAmelCase, lowerCAmelCase : str = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowerCAmelCase : Any = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
lowerCAmelCase : Optional[int] = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : int = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCAmelCase : List[str] = model.decode(
decoder_input_ids[:, :-1] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , )
lowerCAmelCase : Tuple = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowerCAmelCase : Optional[int] = model.decode(
decoder_input_ids[:, -1:] , UpperCamelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , )
lowerCAmelCase : List[Any] = model.decode(UpperCamelCase_ , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ )
lowerCAmelCase : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def _snake_case ( _snake_case : Tuple , _snake_case : Dict , _snake_case : Dict , _snake_case : Optional[Any]=None , _snake_case : Dict=None , ):
if attention_mask is None:
lowerCAmelCase : Tuple = np.not_equal(_snake_case , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
lowerCAmelCase : Dict = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class snake_case_( a__ , unittest.TestCase ):
__UpperCamelCase = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
__UpperCamelCase = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : Optional[Any] = FlaxPegasusModelTester(self )
lowerCAmelCase : Tuple = ConfigTester(self , config_class=UpperCamelCase_ )
def lowerCamelCase__ ( self : str ):
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase, lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase, lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase, lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase : str = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Tuple = model_class(UpperCamelCase_ )
@jax.jit
def encode_jitted(UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[int]=None , **UpperCamelCase_ : Tuple ):
return model.encode(input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ )
with self.subTest('''JIT Enabled''' ):
lowerCAmelCase : Tuple = encode_jitted(**UpperCamelCase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCAmelCase : Dict = encode_jitted(**UpperCamelCase_ ).to_tuple()
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase, lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase : Optional[int] = model_class(UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
lowerCAmelCase : Any = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCamelCase_ : Dict , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] ):
return model.decode(
decoder_input_ids=UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , encoder_outputs=UpperCamelCase_ , )
with self.subTest('''JIT Enabled''' ):
lowerCAmelCase : Optional[Any] = decode_jitted(**UpperCamelCase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCAmelCase : Any = decode_jitted(**UpperCamelCase_ ).to_tuple()
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCamelCase__ ( self : str ):
for model_class_name in self.all_model_classes:
lowerCAmelCase : int = model_class_name.from_pretrained('''google/pegasus-large''' , from_pt=UpperCamelCase_ )
lowerCAmelCase : List[Any] = np.ones((1, 1) )
lowerCAmelCase : str = model(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@slow
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : Any = FlaxPegasusForConditionalGeneration.from_pretrained('''google/pegasus-xsum''' )
lowerCAmelCase : List[Any] = PegasusTokenizer.from_pretrained('''google/pegasus-xsum''' )
lowerCAmelCase : int = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
lowerCAmelCase : str = [
'''California\'s largest electricity provider has turned off power to hundreds of thousands of customers.''',
'''Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.''',
]
lowerCAmelCase : Optional[Any] = tokenizer(UpperCamelCase_ , return_tensors='''np''' , truncation=UpperCamelCase_ , max_length=5_1_2 , padding=UpperCamelCase_ )
lowerCAmelCase : Optional[int] = model.generate(**UpperCamelCase_ , num_beams=2 ).sequences
lowerCAmelCase : Tuple = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
assert tgt_text == decoded
| 60 | 0 |
"""simple docstring"""
from __future__ import annotations
import os
from typing import Any
import requests
_lowerCAmelCase :str = 'https://api.github.com'
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
_lowerCAmelCase :Any = BASE_URL + '/user'
# https://github.com/settings/tokens
_lowerCAmelCase :Tuple = os.environ.get('USER_TOKEN', '')
def lowerCamelCase_ (UpperCamelCase__ : str ):
_UpperCAmelCase : Union[str, Any] = {
'''Authorization''': F'token {auth_token}',
'''Accept''': '''application/vnd.github.v3+json''',
}
return requests.get(UpperCamelCase__ , headers=UpperCamelCase__ ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f"{key}: {value}")
else:
raise ValueError('\'USER_TOKEN\' field cannot be empty.')
| 370 |
"""simple docstring"""
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCAmelCase :List[Any] = 16
_lowerCAmelCase :Tuple = 32
def lowerCamelCase_ (UpperCamelCase__ : Accelerator , UpperCamelCase__ : DatasetDict , UpperCamelCase__ : List[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : int = 16 ):
_UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
_UpperCAmelCase : Union[str, Any] = DatasetDict(
{
'''train''': dataset['''train'''].select(UpperCamelCase__ ),
'''validation''': dataset['''train'''].select(UpperCamelCase__ ),
'''test''': dataset['''validation'''],
} )
def tokenize_function(UpperCamelCase__ : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
_UpperCAmelCase : Any = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_UpperCAmelCase : str = datasets.map(
UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCAmelCase : int = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(UpperCamelCase__ : Tuple ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_UpperCAmelCase : Any = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_UpperCAmelCase : int = 16
elif accelerator.mixed_precision != "no":
_UpperCAmelCase : List[str] = 8
else:
_UpperCAmelCase : Optional[int] = None
return tokenizer.pad(
UpperCamelCase__ , padding='''longest''' , max_length=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_tensors='''pt''' , )
# Instantiate dataloaders.
_UpperCAmelCase : Optional[int] = DataLoader(
tokenized_datasets['''train'''] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ )
_UpperCAmelCase : Any = DataLoader(
tokenized_datasets['''validation'''] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ )
_UpperCAmelCase : Union[str, Any] = DataLoader(
tokenized_datasets['''test'''] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ )
return train_dataloader, eval_dataloader, test_dataloader
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple ):
# New Code #
_UpperCAmelCase : Optional[Any] = []
# Download the dataset
_UpperCAmelCase : str = load_dataset('''glue''' , '''mrpc''' )
# Create our splits
_UpperCAmelCase : Optional[Any] = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
_UpperCAmelCase : str = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCAmelCase : List[str] = config['''lr''']
_UpperCAmelCase : Union[str, Any] = int(config['''num_epochs'''] )
_UpperCAmelCase : Dict = int(config['''seed'''] )
_UpperCAmelCase : Optional[int] = int(config['''batch_size'''] )
_UpperCAmelCase : List[str] = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
_UpperCAmelCase : Union[str, Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_UpperCAmelCase : str = batch_size // MAX_GPU_BATCH_SIZE
_UpperCAmelCase : Dict = MAX_GPU_BATCH_SIZE
set_seed(UpperCamelCase__ )
# New Code #
# Create our folds:
_UpperCAmelCase : Optional[Any] = kfold.split(np.zeros(datasets['''train'''].num_rows ) , datasets['''train''']['''label'''] )
_UpperCAmelCase : str = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(UpperCamelCase__ ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = get_fold_dataloaders(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCAmelCase : Optional[Any] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=UpperCamelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_UpperCAmelCase : List[Any] = model.to(accelerator.device )
# Instantiate optimizer
_UpperCAmelCase : int = AdamW(params=model.parameters() , lr=UpperCamelCase__ )
# Instantiate scheduler
_UpperCAmelCase : Tuple = get_linear_schedule_with_warmup(
optimizer=UpperCamelCase__ , num_warmup_steps=100 , num_training_steps=(len(UpperCamelCase__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Now we train the model
for epoch in range(UpperCamelCase__ ):
model.train()
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_UpperCAmelCase : Optional[Any] = model(**UpperCamelCase__ )
_UpperCAmelCase : List[Any] = outputs.loss
_UpperCAmelCase : Dict = loss / gradient_accumulation_steps
accelerator.backward(UpperCamelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCAmelCase : int = model(**UpperCamelCase__ )
_UpperCAmelCase : Optional[int] = outputs.logits.argmax(dim=-1 )
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=UpperCamelCase__ , references=UpperCamelCase__ , )
_UpperCAmelCase : Any = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , UpperCamelCase__ )
# New Code #
# We also run predictions on the test set at the very end
_UpperCAmelCase : int = []
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCAmelCase : List[Any] = model(**UpperCamelCase__ )
_UpperCAmelCase : Dict = outputs.logits
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(UpperCamelCase__ , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
_UpperCAmelCase : Tuple = torch.cat(UpperCamelCase__ , dim=0 )
_UpperCAmelCase : List[str] = torch.stack(UpperCamelCase__ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
_UpperCAmelCase : List[Any] = metric.compute(predictions=UpperCamelCase__ , references=UpperCamelCase__ )
accelerator.print('''Average test metrics from all folds:''' , UpperCamelCase__ )
def lowerCamelCase_ ():
_UpperCAmelCase : List[str] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=UpperCamelCase__ , default=UpperCamelCase__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
# New Code #
parser.add_argument('''--num_folds''' , type=UpperCamelCase__ , default=3 , help='''The number of splits to perform across the dataset''' )
_UpperCAmelCase : Tuple = parser.parse_args()
_UpperCAmelCase : Optional[int] = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 68 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
_A = [
'''openmmlab/upernet-convnext-tiny''',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
_A = '''UperNetConfig'''
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self , A_ , A_ , A_ , A_ = 0 , A_ = False , A_ = 1 , ) -> None:
super().__init__()
__UpperCamelCase =nn.Convad(
in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , kernel_size=__lowerCAmelCase , padding=__lowerCAmelCase , bias=__lowerCAmelCase , dilation=__lowerCAmelCase , )
__UpperCamelCase =nn.BatchNormad(__lowerCAmelCase )
__UpperCamelCase =nn.ReLU()
def _a ( self , A_ ) -> torch.Tensor:
__UpperCamelCase =self.conv(__lowerCAmelCase )
__UpperCamelCase =self.batch_norm(__lowerCAmelCase )
__UpperCamelCase =self.activation(__lowerCAmelCase )
return output
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self , A_ , A_ , A_ ) -> None:
super().__init__()
__UpperCamelCase =[
nn.AdaptiveAvgPoolad(__lowerCAmelCase ),
UperNetConvModule(__lowerCAmelCase , __lowerCAmelCase , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(__lowerCAmelCase ) , __lowerCAmelCase )
def _a ( self , A_ ) -> torch.Tensor:
__UpperCamelCase =input
for layer in self.layers:
__UpperCamelCase =layer(__lowerCAmelCase )
return hidden_state
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self , A_ , A_ , A_ , A_ ) -> None:
super().__init__()
__UpperCamelCase =pool_scales
__UpperCamelCase =align_corners
__UpperCamelCase =in_channels
__UpperCamelCase =channels
__UpperCamelCase =[]
for i, pool_scale in enumerate(__lowerCAmelCase ):
__UpperCamelCase =UperNetPyramidPoolingBlock(pool_scale=__lowerCAmelCase , in_channels=__lowerCAmelCase , channels=__lowerCAmelCase )
self.blocks.append(__lowerCAmelCase )
self.add_module(str(__lowerCAmelCase ) , __lowerCAmelCase )
def _a ( self , A_ ) -> List[torch.Tensor]:
__UpperCamelCase =[]
for ppm in self.blocks:
__UpperCamelCase =ppm(__lowerCAmelCase )
__UpperCamelCase =nn.functional.interpolate(
__lowerCAmelCase , size=x.size()[2:] , mode='bilinear' , align_corners=self.align_corners )
ppm_outs.append(__lowerCAmelCase )
return ppm_outs
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self , A_ , A_ ) -> List[str]:
super().__init__()
__UpperCamelCase =config
__UpperCamelCase =config.pool_scales # e.g. (1, 2, 3, 6)
__UpperCamelCase =in_channels
__UpperCamelCase =config.hidden_size
__UpperCamelCase =False
__UpperCamelCase =nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
__UpperCamelCase =UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
__UpperCamelCase =UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
__UpperCamelCase =nn.ModuleList()
__UpperCamelCase =nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
__UpperCamelCase =UperNetConvModule(__lowerCAmelCase , self.channels , kernel_size=1 )
__UpperCamelCase =UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(__lowerCAmelCase )
self.fpn_convs.append(__lowerCAmelCase )
__UpperCamelCase =UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def _a ( self ) -> Union[str, Any]:
self.apply(self._init_weights )
def _a ( self , A_ ) -> Dict:
if isinstance(__lowerCAmelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def _a ( self , A_ ) -> Dict:
__UpperCamelCase =inputs[-1]
__UpperCamelCase =[x]
psp_outs.extend(self.psp_modules(__lowerCAmelCase ) )
__UpperCamelCase =torch.cat(__lowerCAmelCase , dim=1 )
__UpperCamelCase =self.bottleneck(__lowerCAmelCase )
return output
def _a ( self , A_ ) -> torch.Tensor:
__UpperCamelCase =[lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(__lowerCAmelCase ) )
# build top-down path
__UpperCamelCase =len(__lowerCAmelCase )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
__UpperCamelCase =laterals[i - 1].shape[2:]
__UpperCamelCase =laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=__lowerCAmelCase , mode='bilinear' , align_corners=self.align_corners )
# build outputs
__UpperCamelCase =[self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
__UpperCamelCase =nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='bilinear' , align_corners=self.align_corners )
__UpperCamelCase =torch.cat(__lowerCAmelCase , dim=1 )
__UpperCamelCase =self.fpn_bottleneck(__lowerCAmelCase )
__UpperCamelCase =self.classifier(__lowerCAmelCase )
return output
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self , A_ , A_ = 2 , A_ = 3 , A_ = 1 ) -> None:
super().__init__()
__UpperCamelCase =config
__UpperCamelCase =config.auxiliary_in_channels
__UpperCamelCase =config.auxiliary_channels
__UpperCamelCase =config.auxiliary_num_convs
__UpperCamelCase =config.auxiliary_concat_input
__UpperCamelCase =in_index
__UpperCamelCase =(kernel_size // 2) * dilation
__UpperCamelCase =[]
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=__lowerCAmelCase , padding=__lowerCAmelCase , dilation=__lowerCAmelCase ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=__lowerCAmelCase , padding=__lowerCAmelCase , dilation=__lowerCAmelCase ) )
if self.num_convs == 0:
__UpperCamelCase =nn.Identity()
else:
__UpperCamelCase =nn.Sequential(*__lowerCAmelCase )
if self.concat_input:
__UpperCamelCase =UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=__lowerCAmelCase , padding=kernel_size // 2 )
__UpperCamelCase =nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def _a ( self ) -> Tuple:
self.apply(self._init_weights )
def _a ( self , A_ ) -> Union[str, Any]:
if isinstance(__lowerCAmelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def _a ( self , A_ ) -> torch.Tensor:
__UpperCamelCase =encoder_hidden_states[self.in_index]
__UpperCamelCase =self.convs(__lowerCAmelCase )
if self.concat_input:
__UpperCamelCase =self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
__UpperCamelCase =self.classifier(__lowerCAmelCase )
return output
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : str = UperNetConfig
UpperCAmelCase__ : List[Any] = '''pixel_values'''
UpperCAmelCase__ : Optional[Any] = True
def _a ( self , A_ ) -> List[str]:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def _a ( self ) -> Tuple:
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def _a ( self , A_ , A_=False ) -> Any:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
__UpperCamelCase =value
_A = R'''
Parameters:
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
_A = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
[`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See
`attentions` under returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under
returned tensors for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes." , A_ , )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , A_ ) -> List[Any]:
super().__init__(__lowerCAmelCase )
__UpperCamelCase =AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
__UpperCamelCase =UperNetHead(__lowerCAmelCase , in_channels=self.backbone.channels )
__UpperCamelCase =UperNetFCNHead(__lowerCAmelCase ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('batch_size, sequence_length' ) )
@replace_return_docstrings(output_type=__lowerCAmelCase , config_class=_CONFIG_FOR_DOC )
def _a ( self , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , ) -> Union[tuple, SemanticSegmenterOutput]:
__UpperCamelCase =return_dict if return_dict is not None else self.config.use_return_dict
__UpperCamelCase =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCamelCase =output_attentions if output_attentions is not None else self.config.output_attentions
__UpperCamelCase =self.backbone.forward_with_filtered_kwargs(
__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , output_attentions=__lowerCAmelCase )
__UpperCamelCase =outputs.feature_maps
__UpperCamelCase =self.decode_head(__lowerCAmelCase )
__UpperCamelCase =nn.functional.interpolate(__lowerCAmelCase , size=pixel_values.shape[2:] , mode='bilinear' , align_corners=__lowerCAmelCase )
__UpperCamelCase =None
if self.auxiliary_head is not None:
__UpperCamelCase =self.auxiliary_head(__lowerCAmelCase )
__UpperCamelCase =nn.functional.interpolate(
__lowerCAmelCase , size=pixel_values.shape[2:] , mode='bilinear' , align_corners=__lowerCAmelCase )
__UpperCamelCase =None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('The number of labels should be greater than one' )
else:
# compute weighted loss
__UpperCamelCase =CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
__UpperCamelCase =loss_fct(__lowerCAmelCase , __lowerCAmelCase )
__UpperCamelCase =loss_fct(__lowerCAmelCase , __lowerCAmelCase )
__UpperCamelCase =main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
__UpperCamelCase =(logits,) + outputs[1:]
else:
__UpperCamelCase =(logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=__lowerCAmelCase , logits=__lowerCAmelCase , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 62 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
A : Optional[Any] = logging.get_logger(__name__)
A : str = '''▁'''
A : Any = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
A : List[Any] = {
'''vocab_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json''',
},
'''spm_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_config_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json''',
},
}
A : Tuple = {
'''facebook/m2m100_418M''': 1_0_2_4,
}
# fmt: off
A : Optional[int] = {
'''m2m100''': ['''af''', '''am''', '''ar''', '''ast''', '''az''', '''ba''', '''be''', '''bg''', '''bn''', '''br''', '''bs''', '''ca''', '''ceb''', '''cs''', '''cy''', '''da''', '''de''', '''el''', '''en''', '''es''', '''et''', '''fa''', '''ff''', '''fi''', '''fr''', '''fy''', '''ga''', '''gd''', '''gl''', '''gu''', '''ha''', '''he''', '''hi''', '''hr''', '''ht''', '''hu''', '''hy''', '''id''', '''ig''', '''ilo''', '''is''', '''it''', '''ja''', '''jv''', '''ka''', '''kk''', '''km''', '''kn''', '''ko''', '''lb''', '''lg''', '''ln''', '''lo''', '''lt''', '''lv''', '''mg''', '''mk''', '''ml''', '''mn''', '''mr''', '''ms''', '''my''', '''ne''', '''nl''', '''no''', '''ns''', '''oc''', '''or''', '''pa''', '''pl''', '''ps''', '''pt''', '''ro''', '''ru''', '''sd''', '''si''', '''sk''', '''sl''', '''so''', '''sq''', '''sr''', '''ss''', '''su''', '''sv''', '''sw''', '''ta''', '''th''', '''tl''', '''tn''', '''tr''', '''uk''', '''ur''', '''uz''', '''vi''', '''wo''', '''xh''', '''yi''', '''yo''', '''zh''', '''zu'''],
'''wmt21''': ['''en''', '''ha''', '''is''', '''ja''', '''cs''', '''ru''', '''zh''', '''de''']
}
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = VOCAB_FILES_NAMES
__lowerCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Dict = ['''input_ids''', '''attention_mask''']
__lowerCamelCase : List[int] = []
__lowerCamelCase : List[int] = []
def __init__( self : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : str=None , __lowerCAmelCase : List[Any]="<s>" , __lowerCAmelCase : List[Any]="</s>" , __lowerCAmelCase : Optional[int]="</s>" , __lowerCAmelCase : Optional[Any]="<pad>" , __lowerCAmelCase : Any="<unk>" , __lowerCAmelCase : Any="m2m100" , __lowerCAmelCase : Optional[Dict[str, Any]] = None , __lowerCAmelCase : Dict=8 , **__lowerCAmelCase : Tuple , ) -> None:
"""simple docstring"""
A__ = {} if sp_model_kwargs is None else sp_model_kwargs
A__ = language_codes
A__ = FAIRSEQ_LANGUAGE_CODES[language_codes]
A__ = {lang_code: f'__{lang_code}__' for lang_code in fairseq_language_code}
A__ = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(__lowerCAmelCase )
for lang_code in fairseq_language_code
if self.get_lang_token(__lowerCAmelCase ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__lowerCAmelCase , tgt_lang=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , language_codes=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=__lowerCAmelCase , **__lowerCAmelCase , )
A__ = vocab_file
A__ = load_json(__lowerCAmelCase )
A__ = {v: k for k, v in self.encoder.items()}
A__ = spm_file
A__ = load_spm(__lowerCAmelCase , self.sp_model_kwargs )
A__ = len(self.encoder )
A__ = {
self.get_lang_token(__lowerCAmelCase ): self.encoder_size + i for i, lang_code in enumerate(__lowerCAmelCase )
}
A__ = {lang_code: self.encoder_size + i for i, lang_code in enumerate(__lowerCAmelCase )}
A__ = {v: k for k, v in self.lang_token_to_id.items()}
A__ = src_lang if src_lang is not None else """en"""
A__ = tgt_lang
A__ = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
A__ = num_madeup_words
@property
def a_ ( self : Optional[int] ) -> int:
"""simple docstring"""
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def a_ ( self : Optional[Any] ) -> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def a_ ( self : List[Any] , __lowerCAmelCase : str ) -> None:
"""simple docstring"""
A__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def a_ ( self : Optional[int] , __lowerCAmelCase : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase )
def a_ ( self : Optional[Any] , __lowerCAmelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(__lowerCAmelCase , self.encoder[self.unk_token] )
def a_ ( self : Optional[int] , __lowerCAmelCase : int ) -> str:
"""simple docstring"""
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(__lowerCAmelCase , self.unk_token )
def a_ ( self : Optional[int] , __lowerCAmelCase : Dict ) -> str:
"""simple docstring"""
A__ = []
A__ = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowerCAmelCase ) + token
A__ = []
else:
current_sub_tokens.append(__lowerCAmelCase )
out_string += self.sp_model.decode(__lowerCAmelCase )
return out_string.strip()
def a_ ( self : List[str] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None , __lowerCAmelCase : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase )
A__ = [1] * len(self.prefix_tokens )
A__ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__lowerCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(__lowerCAmelCase )) + ([0] * len(__lowerCAmelCase )) + suffix_ones
def a_ ( self : Tuple , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def a_ ( self : int ) -> Dict:
"""simple docstring"""
A__ = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
A__ = self.__dict__.copy()
A__ = None
return state
def __setstate__( self : str , __lowerCAmelCase : Dict ) -> None:
"""simple docstring"""
A__ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
A__ = {}
A__ = load_spm(self.spm_file , self.sp_model_kwargs )
def a_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
A__ = Path(__lowerCAmelCase )
if not save_dir.is_dir():
raise OSError(f'{save_directory} should be a directory' )
A__ = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
A__ = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder , __lowerCAmelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __lowerCAmelCase )
elif not os.path.isfile(self.spm_file ):
with open(__lowerCAmelCase , """wb""" ) as fi:
A__ = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
return (str(__lowerCAmelCase ), str(__lowerCAmelCase ))
def a_ ( self : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : str = "en" , __lowerCAmelCase : Optional[List[str]] = None , __lowerCAmelCase : str = "ro" , **__lowerCAmelCase : List[Any] , ) -> BatchEncoding:
"""simple docstring"""
A__ = src_lang
A__ = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
def a_ ( self : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[str] , __lowerCAmelCase : Optional[str] , **__lowerCAmelCase : Tuple ) -> Tuple:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
A__ = src_lang
A__ = self(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , **__lowerCAmelCase )
A__ = self.get_lang_id(__lowerCAmelCase )
A__ = tgt_lang_id
return inputs
def a_ ( self : Dict ) -> int:
"""simple docstring"""
self.set_src_lang_special_tokens(self.src_lang )
def a_ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
self.set_tgt_lang_special_tokens(self.tgt_lang )
def a_ ( self : str , __lowerCAmelCase : str ) -> None:
"""simple docstring"""
A__ = self.get_lang_token(__lowerCAmelCase )
A__ = self.lang_token_to_id[lang_token]
A__ = [self.cur_lang_id]
A__ = [self.eos_token_id]
def a_ ( self : Tuple , __lowerCAmelCase : str ) -> None:
"""simple docstring"""
A__ = self.get_lang_token(__lowerCAmelCase )
A__ = self.lang_token_to_id[lang_token]
A__ = [self.cur_lang_id]
A__ = [self.eos_token_id]
def a_ ( self : Union[str, Any] , __lowerCAmelCase : str ) -> str:
"""simple docstring"""
return self.lang_code_to_token[lang]
def a_ ( self : Union[str, Any] , __lowerCAmelCase : str ) -> int:
"""simple docstring"""
A__ = self.get_lang_token(__lowerCAmelCase )
return self.lang_token_to_id[lang_token]
def __lowerCamelCase ( __a :str , __a :Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
"""simple docstring"""
A__ = sentencepiece.SentencePieceProcessor(**__a )
spm.Load(str(__a ) )
return spm
def __lowerCamelCase ( __a :str ) -> Union[Dict, List]:
"""simple docstring"""
with open(__a , """r""" ) as f:
return json.load(__a )
def __lowerCamelCase ( __a :List[Any] , __a :str ) -> None:
"""simple docstring"""
with open(__a , """w""" ) as f:
json.dump(__a , __a , indent=2 )
| 274 | 0 |
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Features ):
__UpperCamelCase =np.inf
def set_batch_size(SCREAMING_SNAKE_CASE__ : FeatureType ) -> None:
nonlocal batch_size
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =min(SCREAMING_SNAKE_CASE__ , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =min(SCREAMING_SNAKE_CASE__ , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and feature.dtype == "binary":
__UpperCamelCase =min(SCREAMING_SNAKE_CASE__ , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return None if batch_size is np.inf else batch_size
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , A_ , A_ = None , A_ = None , A_ = None , A_ = False , A_ = False , A_ = None , **A_ , ) -> Dict:
super().__init__(
A_ , split=A_ , features=A_ , cache_dir=A_ , keep_in_memory=A_ , streaming=A_ , num_proc=A_ , **A_ , )
__UpperCamelCase =path_or_paths if isinstance(A_ , A_ ) else {self.split: path_or_paths}
__UpperCamelCase =_PACKAGED_DATASETS_MODULES['parquet'][1]
__UpperCamelCase =Parquet(
cache_dir=A_ , data_files=A_ , features=A_ , hash=A_ , **A_ , )
def _a ( self ) -> List[Any]:
# Build iterable dataset
if self.streaming:
__UpperCamelCase =self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
self.builder.download_and_prepare(
download_config=A_ , download_mode=A_ , verification_mode=A_ , base_path=A_ , num_proc=self.num_proc , )
__UpperCamelCase =self.builder.as_dataset(
split=self.split , verification_mode=A_ , in_memory=self.keep_in_memory )
return dataset
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_ , A_ = None , **A_ , ) -> List[Any]:
__UpperCamelCase =dataset
__UpperCamelCase =path_or_buf
__UpperCamelCase =batch_size or get_writer_batch_size(dataset.features )
__UpperCamelCase =parquet_writer_kwargs
def _a ( self ) -> int:
__UpperCamelCase =self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , 'wb+' ) as buffer:
__UpperCamelCase =self._write(file_obj=A_ , batch_size=A_ , **self.parquet_writer_kwargs )
else:
__UpperCamelCase =self._write(file_obj=self.path_or_buf , batch_size=A_ , **self.parquet_writer_kwargs )
return written
def _a ( self , A_ , A_ , **A_ ) -> int:
__UpperCamelCase =0
__UpperCamelCase =parquet_writer_kwargs.pop('path_or_buf' , A_ )
__UpperCamelCase =self.dataset.features.arrow_schema
__UpperCamelCase =pq.ParquetWriter(A_ , schema=A_ , **A_ )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , A_ ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating parquet from Arrow format' , ):
__UpperCamelCase =query_table(
table=self.dataset._data , key=slice(A_ , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(A_ )
written += batch.nbytes
writer.close()
return written
| 117 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : str = "realm"
def __init__( self , A_=30522 , A_=768 , A_=128 , A_=12 , A_=12 , A_=8 , A_=3072 , A_="gelu_new" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1E-12 , A_=256 , A_=10 , A_=1E-3 , A_=5 , A_=320 , A_=13353718 , A_=5000 , A_=1 , A_=0 , A_=2 , **A_ , ) -> Dict:
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
# Common config
__UpperCamelCase =vocab_size
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =hidden_size
__UpperCamelCase =retriever_proj_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =num_candidates
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_act
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =initializer_range
__UpperCamelCase =type_vocab_size
__UpperCamelCase =layer_norm_eps
# Reader config
__UpperCamelCase =span_hidden_size
__UpperCamelCase =max_span_width
__UpperCamelCase =reader_layer_norm_eps
__UpperCamelCase =reader_beam_size
__UpperCamelCase =reader_seq_len
# Retrieval config
__UpperCamelCase =num_block_records
__UpperCamelCase =searcher_beam_size
| 117 | 1 |
'''simple docstring'''
from __future__ import annotations
def snake_case__ ( _A: float , _A: float , _A: float ) -> float:
'''simple docstring'''
if days_between_payments <= 0:
raise ValueError("""days_between_payments must be > 0""" )
if daily_interest_rate < 0:
raise ValueError("""daily_interest_rate must be >= 0""" )
if principal <= 0:
raise ValueError("""principal must be > 0""" )
return principal * daily_interest_rate * days_between_payments
def snake_case__ ( _A: float , _A: float , _A: float , ) -> float:
'''simple docstring'''
if number_of_compounding_periods <= 0:
raise ValueError("""number_of_compounding_periods must be > 0""" )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError("""nominal_annual_interest_rate_percentage must be >= 0""" )
if principal <= 0:
raise ValueError("""principal must be > 0""" )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def snake_case__ ( _A: float , _A: float , _A: float , ) -> float:
'''simple docstring'''
if number_of_years <= 0:
raise ValueError("""number_of_years must be > 0""" )
if nominal_annual_percentage_rate < 0:
raise ValueError("""nominal_annual_percentage_rate must be >= 0""" )
if principal <= 0:
raise ValueError("""principal must be > 0""" )
return compound_interest(
lowerCAmelCase_ , nominal_annual_percentage_rate / 365 , number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 272 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE :Union[str, Any] = {
'''configuration_canine''': ['''CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CanineConfig'''],
'''tokenization_canine''': ['''CanineTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :List[Any] = [
'''CANINE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CanineForMultipleChoice''',
'''CanineForQuestionAnswering''',
'''CanineForSequenceClassification''',
'''CanineForTokenClassification''',
'''CanineLayer''',
'''CanineModel''',
'''CaninePreTrainedModel''',
'''load_tf_weights_in_canine''',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
SCREAMING_SNAKE_CASE :List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 159 | 0 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class _a :
def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=13, SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=99, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=5, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=37, SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=512, SCREAMING_SNAKE_CASE_=16, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=0.0_2, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=None, ) -> int:
UpperCAmelCase_: List[Any] = parent
UpperCAmelCase_: int = batch_size
UpperCAmelCase_: Any = seq_length
UpperCAmelCase_: Optional[int] = is_training
UpperCAmelCase_: Dict = use_input_mask
UpperCAmelCase_: Optional[int] = use_token_type_ids
UpperCAmelCase_: Dict = use_labels
UpperCAmelCase_: List[str] = vocab_size
UpperCAmelCase_: Union[str, Any] = hidden_size
UpperCAmelCase_: List[Any] = num_hidden_layers
UpperCAmelCase_: Tuple = num_attention_heads
UpperCAmelCase_: Optional[int] = intermediate_size
UpperCAmelCase_: Tuple = hidden_act
UpperCAmelCase_: Tuple = hidden_dropout_prob
UpperCAmelCase_: List[str] = attention_probs_dropout_prob
UpperCAmelCase_: Any = max_position_embeddings
UpperCAmelCase_: List[Any] = type_vocab_size
UpperCAmelCase_: List[str] = type_sequence_label_size
UpperCAmelCase_: Tuple = initializer_range
UpperCAmelCase_: Optional[int] = num_labels
UpperCAmelCase_: Union[str, Any] = num_choices
UpperCAmelCase_: Any = scope
def __snake_case (self ) -> Tuple:
UpperCAmelCase_: Tuple = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCAmelCase_: str = None
if self.use_input_mask:
UpperCAmelCase_: Dict = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_: int = None
if self.use_token_type_ids:
UpperCAmelCase_: int = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
UpperCAmelCase_: Dict = None
UpperCAmelCase_: List[str] = None
UpperCAmelCase_: Any = None
if self.use_labels:
UpperCAmelCase_: Tuple = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCAmelCase_: Optional[int] = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
UpperCAmelCase_: Optional[int] = ids_tensor([self.batch_size], self.num_choices )
UpperCAmelCase_: List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __snake_case (self ) -> List[Any]:
return OpenLlamaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=SCREAMING_SNAKE_CASE_, initializer_range=self.initializer_range, use_stable_embedding=SCREAMING_SNAKE_CASE_, )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> str:
UpperCAmelCase_: List[Any] = OpenLlamaModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase_: int = model(SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[str] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, ) -> Optional[Any]:
UpperCAmelCase_: Tuple = True
UpperCAmelCase_: List[Any] = OpenLlamaModel(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase_: Any = model(
SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, encoder_hidden_states=SCREAMING_SNAKE_CASE_, encoder_attention_mask=SCREAMING_SNAKE_CASE_, )
UpperCAmelCase_: Optional[int] = model(
SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, encoder_hidden_states=SCREAMING_SNAKE_CASE_, )
UpperCAmelCase_: str = model(SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, ) -> List[Any]:
UpperCAmelCase_: Any = OpenLlamaForCausalLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase_: Union[str, Any] = model(SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, ) -> Any:
UpperCAmelCase_: Tuple = True
UpperCAmelCase_: Optional[int] = True
UpperCAmelCase_: Dict = OpenLlamaForCausalLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
# first forward pass
UpperCAmelCase_: str = model(
SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, encoder_hidden_states=SCREAMING_SNAKE_CASE_, encoder_attention_mask=SCREAMING_SNAKE_CASE_, use_cache=SCREAMING_SNAKE_CASE_, )
UpperCAmelCase_: Tuple = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase_: Tuple = ids_tensor((self.batch_size, 3), config.vocab_size )
UpperCAmelCase_: Optional[Any] = ids_tensor((self.batch_size, 3), vocab_size=2 )
# append to next input_ids and
UpperCAmelCase_: str = torch.cat([input_ids, next_tokens], dim=-1 )
UpperCAmelCase_: str = torch.cat([input_mask, next_mask], dim=-1 )
UpperCAmelCase_: Dict = model(
SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, encoder_hidden_states=SCREAMING_SNAKE_CASE_, encoder_attention_mask=SCREAMING_SNAKE_CASE_, output_hidden_states=SCREAMING_SNAKE_CASE_, )["""hidden_states"""][0]
UpperCAmelCase_: Tuple = model(
SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, encoder_hidden_states=SCREAMING_SNAKE_CASE_, encoder_attention_mask=SCREAMING_SNAKE_CASE_, past_key_values=SCREAMING_SNAKE_CASE_, output_hidden_states=SCREAMING_SNAKE_CASE_, )["""hidden_states"""][0]
# select random slice
UpperCAmelCase_: str = ids_tensor((1,), output_from_past.shape[-1] ).item()
UpperCAmelCase_: str = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase_: Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1E-3 ) )
def __snake_case (self ) -> List[str]:
UpperCAmelCase_: List[str] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
): List[Any] = config_and_inputs
UpperCAmelCase_: List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _a ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
A = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
A = (OpenLlamaForCausalLM,) if is_torch_available() else ()
A = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
A = False
A = False
def __snake_case (self ) -> int:
UpperCAmelCase_: str = OpenLlamaModelTester(self )
UpperCAmelCase_: Any = ConfigTester(self, config_class=SCREAMING_SNAKE_CASE_, hidden_size=37 )
def __snake_case (self ) -> Optional[int]:
self.config_tester.run_common_tests()
def __snake_case (self ) -> Optional[int]:
UpperCAmelCase_: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Union[str, Any]:
UpperCAmelCase_: List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_: Dict = type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> str:
UpperCAmelCase_ , UpperCAmelCase_: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_: int = 3
UpperCAmelCase_: Tuple = input_dict["""input_ids"""]
UpperCAmelCase_: Optional[int] = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size )
UpperCAmelCase_: Optional[int] = OpenLlamaForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase_: Union[str, Any] = model(SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ )
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) )
def __snake_case (self ) -> int:
UpperCAmelCase_ , UpperCAmelCase_: List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_: Dict = 3
UpperCAmelCase_: Optional[Any] = """single_label_classification"""
UpperCAmelCase_: Optional[int] = input_dict["""input_ids"""]
UpperCAmelCase_: str = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[int] = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size )
UpperCAmelCase_: List[str] = OpenLlamaForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase_: Optional[int] = model(SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ )
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) )
def __snake_case (self ) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_: Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_: Optional[int] = 3
UpperCAmelCase_: int = """multi_label_classification"""
UpperCAmelCase_: Tuple = input_dict["""input_ids"""]
UpperCAmelCase_: int = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Union[str, Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels], self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCAmelCase_: Optional[Any] = OpenLlamaForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase_: Any = model(SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ )
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" )
def __snake_case (self ) -> int:
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_: Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_: Dict = ids_tensor([1, 10], config.vocab_size )
UpperCAmelCase_: Optional[Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )], config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase_: Any = OpenLlamaModel(SCREAMING_SNAKE_CASE_ )
original_model.to(SCREAMING_SNAKE_CASE_ )
original_model.eval()
UpperCAmelCase_: Any = original_model(SCREAMING_SNAKE_CASE_ ).last_hidden_state
UpperCAmelCase_: Tuple = original_model(SCREAMING_SNAKE_CASE_ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase_: Optional[Any] = {"""type""": scaling_type, """factor""": 1_0.0}
UpperCAmelCase_: int = OpenLlamaModel(SCREAMING_SNAKE_CASE_ )
scaled_model.to(SCREAMING_SNAKE_CASE_ )
scaled_model.eval()
UpperCAmelCase_: Union[str, Any] = scaled_model(SCREAMING_SNAKE_CASE_ ).last_hidden_state
UpperCAmelCase_: Union[str, Any] = scaled_model(SCREAMING_SNAKE_CASE_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1E-5 ) )
| 82 |
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
a : Dict = datasets.logging.get_logger(__name__)
a : Any = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n'
a : int = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n'
a : List[Any] = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n'
def lowerCAmelCase_ (lowerCAmelCase__: List[str] , lowerCAmelCase__: List[Any] , lowerCAmelCase__: Dict=False , lowerCAmelCase__: List[Any]=False , lowerCAmelCase__: Any=True , lowerCAmelCase__: Union[str, Any]=False , lowerCAmelCase__: List[Any]="dummy_doc" ):
"""simple docstring"""
UpperCAmelCase_: str = {doc: key_lines}
UpperCAmelCase_: str = {doc: sys_lines}
UpperCAmelCase_: Optional[Any] = {}
UpperCAmelCase_: Optional[int] = 0
UpperCAmelCase_: Optional[Any] = 0
UpperCAmelCase_: str = 0
UpperCAmelCase_: List[Any] = 0
UpperCAmelCase_: Tuple = 0
UpperCAmelCase_: Union[str, Any] = 0
UpperCAmelCase_ , UpperCAmelCase_: List[str] = reader.get_doc_mentions(lowerCAmelCase__ , key_doc_lines[doc] , lowerCAmelCase__ )
key_singletons_num += singletons_num
if NP_only or min_span:
UpperCAmelCase_: List[str] = reader.set_annotated_parse_trees(lowerCAmelCase__ , key_doc_lines[doc] , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_: Any = reader.get_doc_mentions(lowerCAmelCase__ , sys_doc_lines[doc] , lowerCAmelCase__ )
sys_singletons_num += singletons_num
if NP_only or min_span:
UpperCAmelCase_: Tuple = reader.set_annotated_parse_trees(lowerCAmelCase__ , key_doc_lines[doc] , lowerCAmelCase__ , lowerCAmelCase__ )
if remove_nested:
UpperCAmelCase_ , UpperCAmelCase_: str = reader.remove_nested_coref_mentions(lowerCAmelCase__ , lowerCAmelCase__ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
UpperCAmelCase_ , UpperCAmelCase_: Union[str, Any] = reader.remove_nested_coref_mentions(lowerCAmelCase__ , lowerCAmelCase__ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
UpperCAmelCase_: Tuple = reader.get_mention_assignments(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_: Dict = reader.get_mention_assignments(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_: Optional[Any] = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"""Number of removed nested coreferring mentions in the key """
F'annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}' )
logger.info(
"""Number of resulting singleton clusters in the key """
F'annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}' )
if not keep_singletons:
logger.info(
F'{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '
"""files, respectively""" )
return doc_coref_infos
def lowerCAmelCase_ (lowerCAmelCase__: Any , lowerCAmelCase__: Dict , lowerCAmelCase__: int , lowerCAmelCase__: Any , lowerCAmelCase__: Optional[int] , lowerCAmelCase__: Optional[Any] , lowerCAmelCase__: int ):
"""simple docstring"""
UpperCAmelCase_: Tuple = get_coref_infos(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_: Any = {}
UpperCAmelCase_: Tuple = 0
UpperCAmelCase_: Optional[Any] = 0
for name, metric in metrics:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: Dict = evaluator.evaluate_documents(lowerCAmelCase__ , lowerCAmelCase__ , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'{name}/recall': recall, F'{name}/precision': precision, F'{name}/f1': fa} )
logger.info(
name.ljust(1_0 ) , F'Recall: {recall * 1_0_0:.2f}' , F' Precision: {precision * 1_0_0:.2f}' , F' F1: {fa * 1_0_0:.2f}' , )
if conll_subparts_num == 3:
UpperCAmelCase_: List[str] = (conll / 3) * 1_0_0
logger.info(F'CoNLL score: {conll:.2f}' )
output_scores.update({"""conll_score""": conll} )
return output_scores
def lowerCAmelCase_ (lowerCAmelCase__: Optional[Any] ):
"""simple docstring"""
UpperCAmelCase_: Dict = False
for line in key_lines:
if not line.startswith("""#""" ):
if len(line.split() ) > 6:
UpperCAmelCase_: Any = line.split()[5]
if not parse_col == "-":
UpperCAmelCase_: List[str] = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
def __snake_case (self ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Sequence(datasets.Value("""string""" ) ),
} ), codebase_urls=["""https://github.com/ns-moosavi/coval"""], reference_urls=[
"""https://github.com/ns-moosavi/coval""",
"""https://www.aclweb.org/anthology/P16-1060""",
"""http://www.conll.cemantix.org/2012/data.html""",
], )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=False ) -> int:
UpperCAmelCase_: Tuple = [
("""mentions""", evaluator.mentions),
("""muc""", evaluator.muc),
("""bcub""", evaluator.b_cubed),
("""ceafe""", evaluator.ceafe),
("""lea""", evaluator.lea),
]
if min_span:
UpperCAmelCase_: str = util.check_gold_parse_annotation(SCREAMING_SNAKE_CASE_ )
if not has_gold_parse:
raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
UpperCAmelCase_: Tuple = evaluate(
key_lines=SCREAMING_SNAKE_CASE_, sys_lines=SCREAMING_SNAKE_CASE_, metrics=SCREAMING_SNAKE_CASE_, NP_only=SCREAMING_SNAKE_CASE_, remove_nested=SCREAMING_SNAKE_CASE_, keep_singletons=SCREAMING_SNAKE_CASE_, min_span=SCREAMING_SNAKE_CASE_, )
return score
| 82 | 1 |
from math import factorial
def SCREAMING_SNAKE_CASE_ ( __A : int = 20 ) -> int:
"""simple docstring"""
a_ : str = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
a_ : Dict = n // 2
return int(factorial(__A ) / (factorial(__A ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
UpperCAmelCase_ : int = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 32 |
from collections import deque
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = len(lowerCamelCase__ )
lowerCamelCase_ = deque()
lowerCamelCase_ = [False for _ in range(lowerCamelCase__ )]
lowerCamelCase_ = [-1 for _ in range(lowerCamelCase__ )]
lowerCamelCase_ = index_of[:]
def strong_connect(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = index # the number when this node is seen
lowerCamelCase_ = index # lowest rank node reachable from here
index += 1
stack.append(lowerCamelCase__ )
lowerCamelCase_ = True
for w in g[v]:
if index_of[w] == -1:
lowerCamelCase_ = strong_connect(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
lowerCamelCase_ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
lowerCamelCase_ = []
lowerCamelCase_ = stack.pop()
lowerCamelCase_ = False
component.append(lowerCamelCase__ )
while w != v:
lowerCamelCase_ = stack.pop()
lowerCamelCase_ = False
component.append(lowerCamelCase__ )
components.append(lowerCamelCase__ )
return index
lowerCamelCase_ = []
for v in range(lowerCamelCase__ ):
if index_of[v] == -1:
strong_connect(lowerCamelCase__ , 0 , lowerCamelCase__ )
return components
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = [[] for _ in range(lowerCamelCase__ )]
for u, v in edges:
g[u].append(lowerCamelCase__ )
return g
if __name__ == "__main__":
# Test
__A =7
__A =[0, 0, 1, 2, 3, 3, 4, 4, 6]
__A =[1, 3, 2, 0, 1, 4, 5, 6, 5]
__A =[(u, v) for u, v in zip(source, target)]
__A =create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 19 | 0 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def __lowercase ( ) -> Optional[Any]:
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(lowerCAmelCase__ ):
requests.request("""GET""" , """https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 )
@pytest.mark.integration
def __lowercase ( ) -> Any:
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""" , """https://huggingface.co""" )
def __lowercase ( ) -> Dict:
'''simple docstring'''
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(lowerCAmelCase__ ):
http_head("""https://huggingface.co""" )
| 366 |
from PIL import Image
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Image:
'''simple docstring'''
def brightness(_SCREAMING_SNAKE_CASE ) -> float:
return 1_28 + level + (c - 1_28)
if not -255.0 <= level <= 255.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change brightness to 100
SCREAMING_SNAKE_CASE_ = change_brightness(img, 1_0_0)
brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
| 193 | 0 |
"""simple docstring"""
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
__UpperCAmelCase = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class _SCREAMING_SNAKE_CASE ( datasets.BuilderConfig ):
UpperCAmelCase_ :Optional[datasets.Features] = None
def _snake_case ( lowercase__ : "pyspark.sql.DataFrame" , lowercase__ : List[int] , ) -> Any:
'''simple docstring'''
import pyspark
def generate_fn():
lowerCAmelCase_ :List[Any] = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) )
for partition_id in partition_order:
lowerCAmelCase_ :Optional[int] = df_with_partition_id.select("""*""" ).where(f"""part_id = {partition_id}""" ).drop("""part_id""" )
lowerCAmelCase_ :Optional[Any] = partition_df.collect()
lowerCAmelCase_ :Dict = 0
for row in rows:
yield f"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class _SCREAMING_SNAKE_CASE ( _BaseExamplesIterable ):
def __init__( self , __A , __A=None , ) -> Optional[Any]:
lowerCAmelCase_ :List[str] = df
lowerCAmelCase_ :str = partition_order or range(self.df.rdd.getNumPartitions() )
lowerCAmelCase_ :int = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self ) -> Tuple:
yield from self.generate_examples_fn()
def __lowerCAmelCase ( self , __A ) -> "SparkExamplesIterable":
lowerCAmelCase_ :List[Any] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(__A )
return SparkExamplesIterable(self.df , partition_order=__A )
def __lowerCAmelCase ( self , __A , __A ) -> "SparkExamplesIterable":
lowerCAmelCase_ :Optional[Any] = self.split_shard_indices_by_worker(__A , __A )
return SparkExamplesIterable(self.df , partition_order=__A )
@property
def __lowerCAmelCase ( self ) -> int:
return len(self.partition_order )
class _SCREAMING_SNAKE_CASE ( datasets.DatasetBuilder ):
UpperCAmelCase_ :Optional[Any] = SparkConfig
def __init__( self , __A , __A = None , __A = None , **__A , ) -> int:
import pyspark
lowerCAmelCase_ :Tuple = pyspark.sql.SparkSession.builder.getOrCreate()
lowerCAmelCase_ :Union[str, Any] = df
lowerCAmelCase_ :Optional[Any] = working_dir
super().__init__(
cache_dir=__A , config_name=str(self.df.semanticHash() ) , **__A , )
def __lowerCAmelCase ( self ) -> int:
# Returns the path of the created file.
def create_cache_and_write_probe(__A ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=__A )
lowerCAmelCase_ :Union[str, Any] = os.path.join(self._cache_dir , """fs_test""" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(__A , """a""" )
return [probe_file]
if self._spark.conf.get("""spark.master""" , """""" ).startswith("""local""" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
lowerCAmelCase_ :int = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(__A ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"""When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" )
def __lowerCAmelCase ( self ) -> Optional[Any]:
return datasets.DatasetInfo(features=self.config.features )
def __lowerCAmelCase ( self , __A ) -> Any:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def __lowerCAmelCase ( self , __A ) -> Union[str, Any]:
import pyspark
def get_arrow_batch_size(__A ):
for batch in it:
yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} )
lowerCAmelCase_ :Tuple = self.df.count()
lowerCAmelCase_ :Union[str, Any] = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
lowerCAmelCase_ :Tuple = (
self.df.limit(__A )
.repartition(1 )
.mapInArrow(__A , """batch_bytes: long""" )
.agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
lowerCAmelCase_ :List[Any] = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
lowerCAmelCase_ :str = min(__A , int(approx_total_size / max_shard_size ) )
lowerCAmelCase_ :Optional[int] = self.df.repartition(__A )
def __lowerCAmelCase ( self , __A , __A , __A , ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
import pyspark
lowerCAmelCase_ :Optional[int] = ParquetWriter if file_format == """parquet""" else ArrowWriter
lowerCAmelCase_ :Dict = os.path.join(self._working_dir , os.path.basename(__A ) ) if self._working_dir else fpath
lowerCAmelCase_ :Optional[Any] = file_format == """parquet"""
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
lowerCAmelCase_ :List[str] = self.config.features
lowerCAmelCase_ :List[Any] = self._writer_batch_size
lowerCAmelCase_ :str = self._fs.storage_options
def write_arrow(__A ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
lowerCAmelCase_ :Dict = pyspark.TaskContext().taskAttemptId()
lowerCAmelCase_ :int = next(__A , __A )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
lowerCAmelCase_ :Tuple = 0
lowerCAmelCase_ :List[str] = writer_class(
features=__A , path=working_fpath.replace("""SSSSS""" , f"""{shard_id:05d}""" ).replace("""TTTTT""" , f"""{task_id:05d}""" ) , writer_batch_size=__A , storage_options=__A , embed_local_files=__A , )
lowerCAmelCase_ :int = pa.Table.from_batches([first_batch] )
writer.write_table(__A )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
lowerCAmelCase_ , lowerCAmelCase_ :int = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
shard_id += 1
lowerCAmelCase_ :int = writer_class(
features=writer._features , path=working_fpath.replace("""SSSSS""" , f"""{shard_id:05d}""" ).replace("""TTTTT""" , f"""{task_id:05d}""" ) , writer_batch_size=__A , storage_options=__A , embed_local_files=__A , )
lowerCAmelCase_ :Any = pa.Table.from_batches([batch] )
writer.write_table(__A )
if writer._num_bytes > 0:
lowerCAmelCase_ , lowerCAmelCase_ :Any = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(__A ) ):
lowerCAmelCase_ :Optional[int] = os.path.join(os.path.dirname(__A ) , os.path.basename(__A ) )
shutil.move(__A , __A )
lowerCAmelCase_ :Optional[int] = (
self.df.mapInArrow(__A , """task_id: long, num_examples: long, num_bytes: long""" )
.groupBy("""task_id""" )
.agg(
pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) , pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) , pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) , pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def __lowerCAmelCase ( self , __A , __A = "arrow" , __A = None , __A = None , **__A , ) -> Any:
self._validate_cache_dir()
lowerCAmelCase_ :Tuple = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(__A )
lowerCAmelCase_ :Optional[Any] = not is_remote_filesystem(self._fs )
lowerCAmelCase_ :Tuple = os.path.join if is_local else posixpath.join
lowerCAmelCase_ :List[Any] = """-TTTTT-SSSSS-of-NNNNN"""
lowerCAmelCase_ :int = f"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
lowerCAmelCase_ :Optional[Any] = path_join(self._output_dir , __A )
lowerCAmelCase_ :Dict = 0
lowerCAmelCase_ :Any = 0
lowerCAmelCase_ :str = 0
lowerCAmelCase_ :Union[str, Any] = []
lowerCAmelCase_ :List[str] = []
for task_id, content in self._prepare_split_single(__A , __A , __A ):
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) :List[Any] = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(__A )
lowerCAmelCase_ :Optional[int] = total_num_examples
lowerCAmelCase_ :Tuple = total_num_bytes
# should rename everything at the end
logger.debug(f"""Renaming {total_shards} shards.""" )
if total_shards > 1:
lowerCAmelCase_ :Any = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
lowerCAmelCase_ :List[str] = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
__A , __A , __A , ):
rename(
__A , fpath.replace("""SSSSS""" , f"""{shard_id:05d}""" ).replace("""TTTTT""" , f"""{task_id:05d}""" ) , fpath.replace("""TTTTT-SSSSS""" , f"""{global_shard_id:05d}""" ).replace("""NNNNN""" , f"""{total_shards:05d}""" ) , )
lowerCAmelCase_ :Tuple = []
lowerCAmelCase_ :Tuple = 0
for i in range(len(__A ) ):
lowerCAmelCase_ , lowerCAmelCase_ :Dict = task_id_and_num_shards[i]
for shard_id in range(__A ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(__A , len(__A ) ).map(lambda __A : _rename_shard(*__A ) ).collect()
else:
# don't use any pattern
lowerCAmelCase_ :Optional[int] = 0
lowerCAmelCase_ :Optional[Any] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("""SSSSS""" , f"""{shard_id:05d}""" ).replace("""TTTTT""" , f"""{task_id:05d}""" ) , fpath.replace(__A , """""" ) , )
def __lowerCAmelCase ( self , __A , ) -> SparkExamplesIterable:
return SparkExamplesIterable(self.df )
| 84 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
_UpperCamelCase : Optional[int] = '0.12' # assumed parallelism: 8
@require_flax
@is_staging_test
class snake_case__ ( unittest.TestCase):
@classmethod
def A ( cls : Optional[int] ) -> Tuple:
UpperCAmelCase_ : List[str] = TOKEN
HfFolder.save_token(_A )
@classmethod
def A ( cls : int ) -> Tuple:
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def A ( self : Dict ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCAmelCase_ : List[str] = FlaxBertModel(_A )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
UpperCAmelCase_ : Any = FlaxBertModel.from_pretrained(F"{USER}/test-model-flax" )
UpperCAmelCase_ : int = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase_ : Optional[int] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase_ : List[str] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_A , 1e-3 , msg=F"{key} not identical" )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_A , repo_id='''test-model-flax''' , push_to_hub=_A , use_auth_token=self._token )
UpperCAmelCase_ : Union[str, Any] = FlaxBertModel.from_pretrained(F"{USER}/test-model-flax" )
UpperCAmelCase_ : Optional[Any] = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase_ : Optional[int] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase_ : int = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_A , 1e-3 , msg=F"{key} not identical" )
def A ( self : str ) -> Tuple:
UpperCAmelCase_ : List[str] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCAmelCase_ : Optional[Any] = FlaxBertModel(_A )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
UpperCAmelCase_ : List[str] = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
UpperCAmelCase_ : Dict = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase_ : Optional[Any] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase_ : Any = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_A , 1e-3 , msg=F"{key} not identical" )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
_A , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=_A , use_auth_token=self._token )
UpperCAmelCase_ : int = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
UpperCAmelCase_ : Dict = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase_ : Tuple = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase_ : Union[str, Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_A , 1e-3 , msg=F"{key} not identical" )
def __UpperCAmelCase ( A : Union[str, Any] , A : Optional[int] ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = True
UpperCAmelCase_ : Optional[int] = flatten_dict(modela.params )
UpperCAmelCase_ : str = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
UpperCAmelCase_ : int = False
return models_are_equal
@require_flax
class snake_case__ ( unittest.TestCase):
def A ( self : Any ) -> Any:
UpperCAmelCase_ : Any = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
UpperCAmelCase_ : Any = FlaxBertModel(_A )
UpperCAmelCase_ : Tuple = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_A , _A ) )
with self.assertRaises(_A ):
UpperCAmelCase_ : Optional[int] = FlaxBertModel.from_pretrained(_A )
UpperCAmelCase_ : List[Any] = FlaxBertModel.from_pretrained(_A , subfolder=_A )
self.assertTrue(check_models_equal(_A , _A ) )
def A ( self : int ) -> Tuple:
UpperCAmelCase_ : Dict = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
UpperCAmelCase_ : Tuple = FlaxBertModel(_A )
UpperCAmelCase_ : str = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_A , _A ) , max_shard_size='''10KB''' )
with self.assertRaises(_A ):
UpperCAmelCase_ : str = FlaxBertModel.from_pretrained(_A )
UpperCAmelCase_ : Dict = FlaxBertModel.from_pretrained(_A , subfolder=_A )
self.assertTrue(check_models_equal(_A , _A ) )
def A ( self : int ) -> Optional[int]:
UpperCAmelCase_ : int = '''bert'''
UpperCAmelCase_ : Tuple = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(_A ):
UpperCAmelCase_ : Tuple = FlaxBertModel.from_pretrained(_A )
UpperCAmelCase_ : int = FlaxBertModel.from_pretrained(_A , subfolder=_A )
self.assertIsNotNone(_A )
def A ( self : Any ) -> str:
UpperCAmelCase_ : Optional[Any] = '''bert'''
UpperCAmelCase_ : Tuple = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(_A ):
UpperCAmelCase_ : List[Any] = FlaxBertModel.from_pretrained(_A )
UpperCAmelCase_ : List[Any] = FlaxBertModel.from_pretrained(_A , subfolder=_A )
self.assertIsNotNone(_A )
| 304 | 0 |
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase_ (self ):
"""simple docstring"""
a = ["a", "b", "c"]
# Defaults to last layer if both are None
a , a = get_aligned_output_features_output_indices(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , ["c"] )
self.assertEqual(lowerCamelCase_ , [2] )
# Out indices set to match out features
a , a = get_aligned_output_features_output_indices(["a", "c"] , lowerCamelCase_ , lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , ["a", "c"] )
self.assertEqual(lowerCamelCase_ , [0, 2] )
# Out features set to match out indices
a , a = get_aligned_output_features_output_indices(lowerCamelCase_ , [0, 2] , lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , ["a", "c"] )
self.assertEqual(lowerCamelCase_ , [0, 2] )
# Out features selected from negative indices
a , a = get_aligned_output_features_output_indices(lowerCamelCase_ , [-3, -1] , lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , ["a", "c"] )
self.assertEqual(lowerCamelCase_ , [-3, -1] )
def UpperCamelCase_ (self ):
"""simple docstring"""
with self.assertRaises(lowerCamelCase_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , lowerCamelCase_ )
# Out features must be a list
with self.assertRaises(lowerCamelCase_ ):
verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"] )
# Out features must be a subset of stage names
with self.assertRaises(lowerCamelCase_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"] )
# Out indices must be a list or tuple
with self.assertRaises(lowerCamelCase_ ):
verify_out_features_out_indices(lowerCamelCase_ , 0 , ["a", "b"] )
# Out indices must be a subset of stage names
with self.assertRaises(lowerCamelCase_ ):
verify_out_features_out_indices(lowerCamelCase_ , (0, 1) , ["a"] )
# Out features and out indices must be the same length
with self.assertRaises(lowerCamelCase_ ):
verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"] )
# Out features should match out indices
with self.assertRaises(lowerCamelCase_ ):
verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"] )
# Out features and out indices should be in order
with self.assertRaises(lowerCamelCase_ ):
verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"] )
# Check passes with valid inputs
verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"] )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = BackboneMixin()
a = ["a", "b", "c"]
a = ["a", "c"]
a = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
a = ["a", "b"]
self.assertEqual(backbone.out_features , ["a", "b"] )
self.assertEqual(backbone.out_indices , [0, 1] )
a = [-3, -1]
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 351 |
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class _lowercase ( lowerCAmelCase, lowerCAmelCase, unittest.TestCase ):
"""simple docstring"""
__A = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__A = (
{
"feature-extraction": TFMobileBertModel,
"fill-mask": TFMobileBertForMaskedLM,
"question-answering": TFMobileBertForQuestionAnswering,
"text-classification": TFMobileBertForSequenceClassification,
"token-classification": TFMobileBertForTokenClassification,
"zero-shot": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__A = False
__A = False
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False ):
"""simple docstring"""
a = super()._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ )
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
a = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
def __init__(self , lowerCamelCase_ , lowerCamelCase_=13 , lowerCamelCase_=7 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=99 , lowerCamelCase_=32 , lowerCamelCase_=32 , lowerCamelCase_=2 , lowerCamelCase_=4 , lowerCamelCase_=37 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=512 , lowerCamelCase_=16 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=3 , lowerCamelCase_=4 , lowerCamelCase_=None , ):
"""simple docstring"""
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_input_mask
a = use_token_type_ids
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = type_sequence_label_size
a = initializer_range
a = num_labels
a = num_choices
a = scope
a = embedding_size
def UpperCamelCase_ (self ):
"""simple docstring"""
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = None
if self.use_input_mask:
a = random_attention_mask([self.batch_size, self.seq_length] )
a = None
if self.use_token_type_ids:
a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a = None
a = None
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a = ids_tensor([self.batch_size] , self.num_choices )
a = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = TFMobileBertModel(config=lowerCamelCase_ )
a = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
a = model(lowerCamelCase_ )
a = [input_ids, input_mask]
a = model(lowerCamelCase_ )
a = model(lowerCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = TFMobileBertForMaskedLM(config=lowerCamelCase_ )
a = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
a = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = TFMobileBertForNextSentencePrediction(config=lowerCamelCase_ )
a = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
a = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = TFMobileBertForPreTraining(config=lowerCamelCase_ )
a = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
a = model(lowerCamelCase_ )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = self.num_labels
a = TFMobileBertForSequenceClassification(config=lowerCamelCase_ )
a = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
a = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = self.num_choices
a = TFMobileBertForMultipleChoice(config=lowerCamelCase_ )
a = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
a = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
a = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
a = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
a = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = self.num_labels
a = TFMobileBertForTokenClassification(config=lowerCamelCase_ )
a = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
a = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = TFMobileBertForQuestionAnswering(config=lowerCamelCase_ )
a = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
a = model(lowerCamelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) = config_and_inputs
a = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
def UpperCamelCase_ (self ):
"""simple docstring"""
a = TFMobileBertModelTest.TFMobileBertModelTester(self )
a = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def UpperCamelCase_ (self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCamelCase_ )
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
for model_name in ["google/mobilebert-uncased"]:
a = TFMobileBertModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@require_tf
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
a = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased" )
a = tf.constant([[0, 1, 2, 3, 4, 5]] )
a = model(lowerCamelCase_ )[0]
a = [1, 6, 30522]
self.assertEqual(output.shape , lowerCamelCase_ )
a = tf.constant(
[
[
[-4.591_9547, -9.24_8295, -9.64_5256],
[-6.730_6175, -6.44_0284, -6.605_2837],
[-7.274_3506, -6.784_7915, -6.02_4673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowerCamelCase_ , atol=1E-4 )
| 71 | 0 |
'''simple docstring'''
import os
lowerCAmelCase: Any = {'I': 1, 'V': 5, 'X': 1_0, 'L': 5_0, 'C': 1_0_0, 'D': 5_0_0, 'M': 1_0_0_0}
def lowerCamelCase__ ( _A ):
a : Optional[Any] = 0
a : Optional[int] = 0
while index < len(_A ) - 1:
a : Any = SYMBOLS[numerals[index]]
a : Union[str, Any] = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def lowerCamelCase__ ( _A ):
a : str = ''
a : Union[str, Any] = num // 1000
numerals += m_count * "M"
num %= 1000
a : int = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
a : Union[str, Any] = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def lowerCamelCase__ ( _A = "/p089_roman.txt" ):
a : Optional[int] = 0
with open(os.path.dirname(_A ) + roman_numerals_filename ) as filea:
a : Tuple = filea.readlines()
for line in lines:
a : Dict = line.strip()
a : Tuple = parse_roman_numerals(_A )
a : int = generate_roman_numerals(_A )
savings += len(_A ) - len(_A )
return savings
if __name__ == "__main__":
print(F"{solution() = }") | 297 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
lowerCAmelCase: Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase: List[str] = {
't5-small': 'https://huggingface.co/t5-small/resolve/main/config.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/config.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/config.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/config.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/config.json',
}
class a__( lowerCamelCase__ ):
lowercase__ = """t5"""
lowercase__ = ["""past_key_values"""]
lowercase__ = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self : Union[str, Any] , __snake_case : int=3_21_28 , __snake_case : str=5_12 , __snake_case : Dict=64 , __snake_case : Optional[int]=20_48 , __snake_case : Tuple=6 , __snake_case : Any=None , __snake_case : Optional[int]=8 , __snake_case : str=32 , __snake_case : Union[str, Any]=1_28 , __snake_case : Optional[int]=0.1 , __snake_case : Dict=1e-6 , __snake_case : int=1.0 , __snake_case : Optional[int]="relu" , __snake_case : Any=True , __snake_case : List[str]=True , __snake_case : Union[str, Any]=0 , __snake_case : Dict=1 , **__snake_case : Optional[int] , ):
a : Optional[int] = vocab_size
a : Dict = d_model
a : Union[str, Any] = d_kv
a : Dict = d_ff
a : Tuple = num_layers
a : Dict = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
a : int = num_heads
a : str = relative_attention_num_buckets
a : List[Any] = relative_attention_max_distance
a : int = dropout_rate
a : Tuple = layer_norm_epsilon
a : str = initializer_factor
a : List[Any] = feed_forward_proj
a : Union[str, Any] = use_cache
a : List[str] = self.feed_forward_proj.split('-' )
a : int = act_info[-1]
a : Union[str, Any] = act_info[0] == 'gated'
if len(__snake_case ) > 1 and act_info[0] != "gated" or len(__snake_case ) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
a : Optional[Any] = 'gelu_new'
super().__init__(
pad_token_id=__snake_case , eos_token_id=__snake_case , is_encoder_decoder=__snake_case , **__snake_case , )
class a__( lowerCamelCase__ ):
@property
def lowercase_ ( self : Optional[int] ):
a : Dict = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
a : Dict = 'past_encoder_sequence + sequence'
a : Dict = {0: 'batch'}
a : Any = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
a : Optional[Any] = {0: 'batch', 1: 'decoder_sequence'}
a : List[str] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__snake_case , direction='inputs' )
return common_inputs
@property
def lowercase_ ( self : List[Any] ):
return 13 | 297 | 1 |
"""simple docstring"""
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class _lowerCamelCase ( _lowercase , unittest.TestCase ):
UpperCAmelCase_ = DebertaVaTokenizer
UpperCAmelCase_ = DebertaVaTokenizerFast
UpperCAmelCase_ = True
UpperCAmelCase_ = True
def snake_case_ (self ) -> Dict:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase = DebertaVaTokenizer(__a , unk_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case_ (self , __a ) -> Dict:
UpperCamelCase = "this is a test"
UpperCamelCase = "this is a test"
return input_text, output_text
def snake_case_ (self ) -> Optional[int]:
UpperCamelCase = "<pad>"
UpperCamelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def snake_case_ (self ) -> Union[str, Any]:
UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "[PAD]" )
self.assertEqual(len(__a ) , 3_00_01 )
def snake_case_ (self ) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size , 3_00_00 )
def snake_case_ (self ) -> Optional[Any]:
# fmt: off
UpperCamelCase = " \tHeLLo!how \n Are yoU? "
UpperCamelCase = ["▁hello", "!", "how", "▁are", "▁you", "?"]
# fmt: on
UpperCamelCase = DebertaVaTokenizer(__a , do_lower_case=__a )
UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(__a , add_special_tokens=__a ) )
self.assertListEqual(__a , __a )
UpperCamelCase = DebertaVaTokenizerFast(__a , do_lower_case=__a )
UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__a , add_special_tokens=__a ) )
self.assertListEqual(__a , __a )
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def snake_case_ (self ) -> Dict:
pass
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def snake_case_ (self ) -> Tuple:
pass
def snake_case_ (self ) -> Union[str, Any]:
# fmt: off
UpperCamelCase = "I was born in 92000, and this is falsé."
UpperCamelCase = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
UpperCamelCase = DebertaVaTokenizer(__a , split_by_punct=__a )
UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(__a , add_special_tokens=__a ) )
self.assertListEqual(__a , __a )
UpperCamelCase = DebertaVaTokenizerFast(__a , split_by_punct=__a )
UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__a , add_special_tokens=__a ) )
self.assertListEqual(__a , __a )
def snake_case_ (self ) -> Union[str, Any]:
# fmt: off
UpperCamelCase = "I was born in 92000, and this is falsé."
UpperCamelCase = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
UpperCamelCase = DebertaVaTokenizer(__a , do_lower_case=__a , split_by_punct=__a )
UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(__a , add_special_tokens=__a ) )
self.assertListEqual(__a , __a )
UpperCamelCase = DebertaVaTokenizerFast(__a , do_lower_case=__a , split_by_punct=__a )
UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__a , add_special_tokens=__a ) )
self.assertListEqual(__a , __a )
def snake_case_ (self ) -> Union[str, Any]:
# fmt: off
UpperCamelCase = "I was born in 92000, and this is falsé."
UpperCamelCase = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
UpperCamelCase = DebertaVaTokenizer(__a , do_lower_case=__a , split_by_punct=__a )
UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(__a , add_special_tokens=__a ) )
self.assertListEqual(__a , __a )
UpperCamelCase = DebertaVaTokenizerFast(__a , do_lower_case=__a , split_by_punct=__a )
UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__a , add_special_tokens=__a ) )
self.assertListEqual(__a , __a )
def snake_case_ (self ) -> int:
# fmt: off
UpperCamelCase = "I was born in 92000, and this is falsé."
UpperCamelCase = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
UpperCamelCase = DebertaVaTokenizer(__a , do_lower_case=__a , split_by_punct=__a )
UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(__a , add_special_tokens=__a ) )
self.assertListEqual(__a , __a )
UpperCamelCase = DebertaVaTokenizerFast(__a , do_lower_case=__a , split_by_punct=__a )
UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__a , add_special_tokens=__a ) )
self.assertListEqual(__a , __a )
def snake_case_ (self ) -> int:
# fmt: off
UpperCamelCase = " \tHeLLo!how \n Are yoU? "
UpperCamelCase = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"]
# fmt: on
UpperCamelCase = DebertaVaTokenizer(__a , do_lower_case=__a , split_by_punct=__a )
UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(__a , add_special_tokens=__a ) )
self.assertListEqual(__a , __a )
UpperCamelCase = DebertaVaTokenizerFast(__a , do_lower_case=__a , split_by_punct=__a )
UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__a , add_special_tokens=__a ) )
self.assertListEqual(__a , __a )
def snake_case_ (self ) -> Union[str, Any]:
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_rust_tokenizer()
UpperCamelCase = "I was born in 92000, and this is falsé."
UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(__a , add_special_tokens=__a ) )
UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__a , add_special_tokens=__a ) )
self.assertListEqual(__a , __a )
UpperCamelCase = tokenizer.encode(__a , add_special_tokens=__a )
UpperCamelCase = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
UpperCamelCase = self.get_rust_tokenizer()
UpperCamelCase = tokenizer.encode(__a )
UpperCamelCase = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
def snake_case_ (self ) -> Optional[Any]:
UpperCamelCase = "This is a test"
UpperCamelCase = [13, 1, 43_98, 25, 21, 12_89]
UpperCamelCase = ["▁", "T", "his", "▁is", "▁a", "▁test"]
UpperCamelCase = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"]
UpperCamelCase = DebertaVaTokenizer(__a , keep_accents=__a )
UpperCamelCase = DebertaVaTokenizerFast(__a , keep_accents=__a )
UpperCamelCase = tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
UpperCamelCase = tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
UpperCamelCase = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(__a , __a )
UpperCamelCase = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
UpperCamelCase = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(__a , __a )
# fmt: off
UpperCamelCase = "I was born in 92000, and this is falsé."
UpperCamelCase = [13, 1, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9]
UpperCamelCase = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ]
UpperCamelCase = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
UpperCamelCase = tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
UpperCamelCase = tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
UpperCamelCase = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(__a , __a )
UpperCamelCase = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
UpperCamelCase = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(__a , __a )
def snake_case_ (self ) -> Any:
UpperCamelCase = DebertaVaTokenizer(__a )
UpperCamelCase = tokenizer.encode("sequence builders" )
UpperCamelCase = tokenizer.encode("multi-sequence build" )
UpperCamelCase = tokenizer.build_inputs_with_special_tokens(__a )
UpperCamelCase = tokenizer.build_inputs_with_special_tokens(__a , __a )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , __a )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , __a , )
@slow
def snake_case_ (self ) -> Dict:
# fmt: off
UpperCamelCase = {"input_ids": [[1, 3_98_67, 36, 1_93_90, 4_86, 27, 3_50_52, 8_14_36, 18, 6_06_85, 12_25, 7, 3_50_52, 8_14_36, 18, 93_67, 1_68_99, 18, 1_59_37, 53, 5_94, 7_73, 18, 1_62_87, 3_04_65, 36, 1_59_37, 6, 4_11_39, 38, 3_69_79, 6_07_63, 1_91, 6, 3_41_32, 99, 6, 5_05_38, 3_90, 4_32_30, 6, 3_41_32, 27_79, 2_08_50, 14, 6_99, 10_72, 11_94, 36, 3_82, 1_09_01, 53, 7, 6_99, 10_72, 20_84, 36, 2_04_22, 6_30, 53, 19, 1_05, 30_49, 18_96, 10_53, 1_68_99, 15_06, 11, 3_79_78, 42_43, 7, 12_37, 3_18_69, 2_00, 1_65_66, 6_54, 6, 3_50_52, 8_14_36, 7, 5_56_30, 1_35_93, 4, 2], [1, 26, 1_50_11, 13, 6_67, 8, 10_53, 18, 2_36_11, 12_37, 7_23_56, 1_28_20, 34, 10_41_34, 12_09, 35, 1_33_13, 66_27, 21, 2_02, 3_47, 7, 1_64, 23_99, 11, 46, 44_85, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 12_32, 28_64, 1_57_85, 1_49_51, 1_05, 5, 85_81, 12_50, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="microsoft/deberta-v2-xlarge" , revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" , )
| 244 |
"""simple docstring"""
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
lowerCAmelCase__ = NewType('''DataClass''', Any)
lowerCAmelCase__ = NewType('''DataClassType''', Any)
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F"Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive)." )
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = {str(_SCREAMING_SNAKE_CASE ): choice for choice in choices}
return lambda _SCREAMING_SNAKE_CASE : str_to_choice.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def a__ ( *,
_SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = dataclasses.MISSING , _SCREAMING_SNAKE_CASE = dataclasses.MISSING , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
UpperCamelCase = {}
if aliases is not None:
UpperCamelCase = aliases
if help is not None:
UpperCamelCase = help
return dataclasses.field(metadata=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , default_factory=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
class _lowerCamelCase ( _lowercase ):
UpperCAmelCase_ = 42
def __init__(self , __a , **__a ) -> Any:
# To make the default appear when using --help
if "formatter_class" not in kwargs:
UpperCamelCase = ArgumentDefaultsHelpFormatter
super().__init__(**__a )
if dataclasses.is_dataclass(__a ):
UpperCamelCase = [dataclass_types]
UpperCamelCase = list(__a )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(__a )
@staticmethod
def snake_case_ (__a , __a ) -> Optional[Any]:
UpperCamelCase = F"--{field.name}"
UpperCamelCase = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , __a ):
raise RuntimeError(
"Unresolved type detected, which should have been done with the help of "
"`typing.get_type_hints` method by default" )
UpperCamelCase = kwargs.pop("aliases" , [] )
if isinstance(__a , __a ):
UpperCamelCase = [aliases]
UpperCamelCase = getattr(field.type , "__origin__" , field.type )
if origin_type is Union or (hasattr(__a , "UnionType" ) and isinstance(__a , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(__a ) not in field.type.__args__
):
raise ValueError(
"Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"
" the argument parser only supports one type per argument."
F" Problem encountered in field '{field.name}'." )
if type(__a ) not in field.type.__args__:
# filter `str` in Union
UpperCamelCase = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
UpperCamelCase = getattr(field.type , "__origin__" , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
UpperCamelCase = (
field.type.__args__[0] if isinstance(__a , field.type.__args__[1] ) else field.type.__args__[1]
)
UpperCamelCase = getattr(field.type , "__origin__" , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
UpperCamelCase = {}
if origin_type is Literal or (isinstance(field.type , __a ) and issubclass(field.type , __a )):
if origin_type is Literal:
UpperCamelCase = field.type.__args__
else:
UpperCamelCase = [x.value for x in field.type]
UpperCamelCase = make_choice_type_function(kwargs["choices"] )
if field.default is not dataclasses.MISSING:
UpperCamelCase = field.default
else:
UpperCamelCase = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
UpperCamelCase = copy(__a )
# Hack because type=bool in argparse does not behave as we want.
UpperCamelCase = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
UpperCamelCase = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
UpperCamelCase = default
# This tells argparse we accept 0 or 1 value after --field_name
UpperCamelCase = "?"
# This is the value that will get picked if we do --field_name (without value)
UpperCamelCase = True
elif isclass(__a ) and issubclass(__a , __a ):
UpperCamelCase = field.type.__args__[0]
UpperCamelCase = "+"
if field.default_factory is not dataclasses.MISSING:
UpperCamelCase = field.default_factory()
elif field.default is dataclasses.MISSING:
UpperCamelCase = True
else:
UpperCamelCase = field.type
if field.default is not dataclasses.MISSING:
UpperCamelCase = field.default
elif field.default_factory is not dataclasses.MISSING:
UpperCamelCase = field.default_factory()
else:
UpperCamelCase = True
parser.add_argument(__a , *__a , **__a )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
UpperCamelCase = False
parser.add_argument(F"--no_{field.name}" , action="store_false" , dest=field.name , **__a )
def snake_case_ (self , __a ) -> List[Any]:
if hasattr(__a , "_argument_group_name" ):
UpperCamelCase = self.add_argument_group(dtype._argument_group_name )
else:
UpperCamelCase = self
try:
UpperCamelCase = get_type_hints(__a )
except NameError:
raise RuntimeError(
F"Type resolution failed for {dtype}. Try declaring the class in global scope or "
"removing line of `from __future__ import annotations` which opts in Postponed "
"Evaluation of Annotations (PEP 563)" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(__a ):
UpperCamelCase = ".".join(map(__a , sys.version_info[:3] ) )
raise RuntimeError(
F"Type resolution failed for {dtype} on Python {python_version}. Try removing "
"line of `from __future__ import annotations` which opts in union types as "
"`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To "
"support Python versions that lower than 3.10, you need to use "
"`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of "
"`X | None`." ) from ex
raise
for field in dataclasses.fields(__a ):
if not field.init:
continue
UpperCamelCase = type_hints[field.name]
self._parse_dataclass_field(__a , __a )
def snake_case_ (self , __a=None , __a=False , __a=True , __a=None , __a=None , ) -> Tuple[DataClass, ...]:
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
UpperCamelCase = []
if args_filename:
args_files.append(Path(__a ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(".args" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
UpperCamelCase = ArgumentParser()
args_file_parser.add_argument(__a , type=__a , action="append" )
# Use only remaining args for further parsing (remove the args_file_flag)
UpperCamelCase , UpperCamelCase = args_file_parser.parse_known_args(args=__a )
UpperCamelCase = vars(__a ).get(args_file_flag.lstrip("-" ) , __a )
if cmd_args_file_paths:
args_files.extend([Path(__a ) for p in cmd_args_file_paths] )
UpperCamelCase = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
UpperCamelCase = file_args + args if args is not None else file_args + sys.argv[1:]
UpperCamelCase , UpperCamelCase = self.parse_known_args(args=__a )
UpperCamelCase = []
for dtype in self.dataclass_types:
UpperCamelCase = {f.name for f in dataclasses.fields(__a ) if f.init}
UpperCamelCase = {k: v for k, v in vars(__a ).items() if k in keys}
for k in keys:
delattr(__a , __a )
UpperCamelCase = dtype(**__a )
outputs.append(__a )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(__a )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F"Some specified arguments are not used by the HfArgumentParser: {remaining_args}" )
return (*outputs,)
def snake_case_ (self , __a , __a = False ) -> Tuple[DataClass, ...]:
UpperCamelCase = set(args.keys() )
UpperCamelCase = []
for dtype in self.dataclass_types:
UpperCamelCase = {f.name for f in dataclasses.fields(__a ) if f.init}
UpperCamelCase = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
UpperCamelCase = dtype(**__a )
outputs.append(__a )
if not allow_extra_keys and unused_keys:
raise ValueError(F"Some keys are not used by the HfArgumentParser: {sorted(__a )}" )
return tuple(__a )
def snake_case_ (self , __a , __a = False ) -> Tuple[DataClass, ...]:
with open(Path(__a ) , encoding="utf-8" ) as open_json_file:
UpperCamelCase = json.loads(open_json_file.read() )
UpperCamelCase = self.parse_dict(__a , allow_extra_keys=__a )
return tuple(__a )
def snake_case_ (self , __a , __a = False ) -> Tuple[DataClass, ...]:
UpperCamelCase = self.parse_dict(yaml.safe_load(Path(__a ).read_text() ) , allow_extra_keys=__a )
return tuple(__a )
| 244 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : Optional[Any] = """philschmid/bart-large-cnn-samsum"""
_lowercase : List[Any] = (
"""This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, """
"""and returns a summary of the text."""
)
_lowercase : Any = """summarizer"""
_lowercase : Any = AutoTokenizer
_lowercase : str = AutoModelForSeqaSeqLM
_lowercase : Optional[int] = ["""text"""]
_lowercase : Optional[int] = ["""text"""]
def _lowercase ( self , lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
return self.pre_processor(lowerCAmelCase__ , return_tensors="pt" , truncation=lowerCAmelCase__ )
def _lowercase ( self , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
return self.model.generate(**lowerCAmelCase__ )[0]
def _lowercase ( self , lowerCAmelCase__ ) -> Any:
'''simple docstring'''
return self.pre_processor.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ )
| 95 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase : Any = 16
UpperCAmelCase : str = 32
def _A ( SCREAMING_SNAKE_CASE : Accelerator , SCREAMING_SNAKE_CASE : int = 16 ):
"""simple docstring"""
a__ : int =AutoTokenizer.from_pretrained("bert-base-cased" )
a__ : List[str] =load_dataset("glue" , "mrpc" )
def tokenize_function(SCREAMING_SNAKE_CASE : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
a__ : int =tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
a__ : Dict =datasets.map(
SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
a__ : Dict =tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(SCREAMING_SNAKE_CASE : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
a__ : Optional[Any] =128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
a__ : str =16
elif accelerator.mixed_precision != "no":
a__ : Union[str, Any] =8
else:
a__ : List[str] =None
return tokenizer.pad(
SCREAMING_SNAKE_CASE , padding="longest" , max_length=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_tensors="pt" , )
# Instantiate dataloaders.
a__ : Any =DataLoader(
tokenized_datasets["train"] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
a__ : int =DataLoader(
tokenized_datasets["validation"] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCAmelCase : str = mocked_dataloaders # noqa: F811
def _A ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , SCREAMING_SNAKE_CASE ) == "1":
a__ : Tuple =2
# Initialize accelerator
a__ : int =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a__ : Optional[int] =config["lr"]
a__ : Union[str, Any] =int(config["num_epochs"] )
a__ : Any =int(config["seed"] )
a__ : Dict =int(config["batch_size"] )
a__ : int =evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
a__ : int =1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
a__ : Dict =batch_size // MAX_GPU_BATCH_SIZE
a__ : Tuple =MAX_GPU_BATCH_SIZE
set_seed(SCREAMING_SNAKE_CASE )
a__ , a__ : Optional[int] =get_dataloaders(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a__ : List[str] =AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
a__ : List[str] =model.to(accelerator.device )
# Instantiate optimizer
a__ : List[Any] =AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE )
# Instantiate scheduler
a__ : Optional[int] =get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE , num_warmup_steps=100 , num_training_steps=(len(SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a__ , a__ , a__ , a__ , a__ : Optional[int] =accelerator.prepare(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
a__ : Dict =model(**SCREAMING_SNAKE_CASE )
a__ : List[Any] =outputs.loss
a__ : List[str] =loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
a__ : Optional[Any] =0
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
a__ : Any =model(**SCREAMING_SNAKE_CASE )
a__ : str =outputs.logits.argmax(dim=-1 )
a__ , a__ : List[str] =accelerator.gather((predictions, batch["labels"]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(SCREAMING_SNAKE_CASE ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
a__ : Optional[Any] =predictions[: len(eval_dataloader.dataset ) - samples_seen]
a__ : Dict =references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE , references=SCREAMING_SNAKE_CASE , )
a__ : Tuple =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , SCREAMING_SNAKE_CASE )
def _A ( ):
"""simple docstring"""
a__ : List[str] =argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
a__ : str =parser.parse_args()
a__ : Optional[int] ={"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 95 | 1 |
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase_ :
"""simple docstring"""
def __init__( self : int ,lowercase__ : str ,lowercase__ : List[Any]=1_3 ,lowercase__ : Optional[int]=3_2 ,lowercase__ : Any=3 ,lowercase__ : int=4 ,lowercase__ : Optional[int]=[1_0, 2_0, 3_0, 4_0] ,lowercase__ : List[Any]=[2, 2, 3, 2] ,lowercase__ : List[Any]=True ,lowercase__ : Optional[Any]=True ,lowercase__ : int=3_7 ,lowercase__ : Union[str, Any]="gelu" ,lowercase__ : Tuple=1_0 ,lowercase__ : int=0.0_2 ,lowercase__ : Any=["stage2", "stage3", "stage4"] ,lowercase__ : Optional[Any]=3 ,lowercase__ : Tuple=None ,):
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = num_channels
__lowercase = num_stages
__lowercase = hidden_sizes
__lowercase = depths
__lowercase = is_training
__lowercase = use_labels
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = out_features
__lowercase = num_labels
__lowercase = scope
__lowercase = num_stages
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__lowercase = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : str ):
return ConvNextConfig(
num_channels=self.num_channels ,num_stages=self.num_stages ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,is_training=self.is_training ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,out_features=self.out_features ,)
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
return UperNetConfig(
backbone_config=self.get_backbone_config() ,hidden_size=5_1_2 ,pool_scales=[1, 2, 3, 6] ,use_auxiliary_head=lowercase__ ,auxiliary_loss_weight=0.4 ,auxiliary_in_channels=4_0 ,auxiliary_channels=2_5_6 ,auxiliary_num_convs=1 ,auxiliary_concat_input=lowercase__ ,loss_ignore_index=2_5_5 ,num_labels=self.num_labels ,)
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ,lowercase__ : Any ,lowercase__ : Any ):
__lowercase = UperNetForSemanticSegmentation(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE : Tuple = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
SCREAMING_SNAKE_CASE : Tuple = False
SCREAMING_SNAKE_CASE : Dict = False
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : Optional[Any] = False
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : List[Any] = False
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = UperNetModelTester(self )
__lowercase = ConfigTester(self ,config_class=lowercase__ ,has_text_modality=lowercase__ ,hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
return
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase__ )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowercase__ )
@unittest.skip(reason='''UperNet does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self : int ):
pass
@unittest.skip(reason='''UperNet does not support input and output embeddings''' )
def SCREAMING_SNAKE_CASE ( self : str ):
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
pass
@require_torch_multi_gpu
@unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def SCREAMING_SNAKE_CASE ( self : Any ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
pass
def SCREAMING_SNAKE_CASE ( self : Any ):
def check_hidden_states_output(lowercase__ : List[str] ,lowercase__ : int ,lowercase__ : List[str] ):
__lowercase = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowercase__ ,lowercase__ ) )
__lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowercase = self.model_tester.num_stages
self.assertEqual(len(lowercase__ ) ,expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(lowercase__ ,lowercase__ ,lowercase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(lowercase__ ,lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = _config_zero_init(lowercase__ )
__lowercase = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
__lowercase = model_class(config=lowercase__ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=F"Parameter {name} of model {model_class} seems not properly initialized" ,)
@unittest.skip(reason='''UperNet does not have tied weights''' )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
pass
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = UperNetForSemanticSegmentation.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
def _A ( ):
"""simple docstring"""
__lowercase = hf_hub_download(
repo_id='''hf-internal-testing/fixtures_ade20k''' , repo_type='''dataset''' , filename='''ADE_val_00000001.jpg''' )
__lowercase = Image.open(A__ ).convert('''RGB''' )
return image
@require_torch
@require_vision
@slow
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''' )
__lowercase = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''' ).to(lowercase__ )
__lowercase = prepare_img()
__lowercase = processor(images=lowercase__ ,return_tensors='''pt''' ).to(lowercase__ )
with torch.no_grad():
__lowercase = model(**lowercase__ )
__lowercase = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) )
self.assertEqual(outputs.logits.shape ,lowercase__ )
__lowercase = torch.tensor(
[[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] ).to(lowercase__ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] ,lowercase__ ,atol=1e-4 ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''' )
__lowercase = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''' ).to(lowercase__ )
__lowercase = prepare_img()
__lowercase = processor(images=lowercase__ ,return_tensors='''pt''' ).to(lowercase__ )
with torch.no_grad():
__lowercase = model(**lowercase__ )
__lowercase = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) )
self.assertEqual(outputs.logits.shape ,lowercase__ )
__lowercase = torch.tensor(
[[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] ).to(lowercase__ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] ,lowercase__ ,atol=1e-4 ) )
| 52 |
'''simple docstring'''
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''facebook/data2vec-base-960h''': '''https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json''',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = 'data2vec-audio'
def __init__( self : int ,lowercase__ : Tuple=3_2 ,lowercase__ : Optional[Any]=7_6_8 ,lowercase__ : Any=1_2 ,lowercase__ : Dict=1_2 ,lowercase__ : Optional[int]=3_0_7_2 ,lowercase__ : Optional[Any]="gelu" ,lowercase__ : Tuple=0.1 ,lowercase__ : Union[str, Any]=0.1 ,lowercase__ : Tuple=0.1 ,lowercase__ : Union[str, Any]=0.0 ,lowercase__ : Dict=0.1 ,lowercase__ : str=0.1 ,lowercase__ : Dict=0.0_2 ,lowercase__ : Tuple=1e-5 ,lowercase__ : Dict="gelu" ,lowercase__ : Optional[Any]=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) ,lowercase__ : Tuple=(5, 2, 2, 2, 2, 2, 2) ,lowercase__ : Optional[Any]=(1_0, 3, 3, 3, 3, 2, 2) ,lowercase__ : Optional[int]=False ,lowercase__ : int=1_6 ,lowercase__ : Tuple=1_9 ,lowercase__ : Optional[Any]=5 ,lowercase__ : Tuple=0.0_5 ,lowercase__ : Union[str, Any]=1_0 ,lowercase__ : int=2 ,lowercase__ : Optional[int]=0.0 ,lowercase__ : Optional[int]=1_0 ,lowercase__ : Optional[Any]=0 ,lowercase__ : str="sum" ,lowercase__ : Any=False ,lowercase__ : List[Any]=False ,lowercase__ : Any=2_5_6 ,lowercase__ : List[Any]=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) ,lowercase__ : str=(5, 3, 3, 1, 1) ,lowercase__ : Optional[Any]=(1, 2, 3, 1, 1) ,lowercase__ : Optional[Any]=5_1_2 ,lowercase__ : Tuple=0 ,lowercase__ : int=1 ,lowercase__ : Dict=2 ,lowercase__ : Any=False ,lowercase__ : int=3 ,lowercase__ : List[str]=2 ,lowercase__ : Tuple=3 ,lowercase__ : Optional[int]=None ,**lowercase__ : str ,):
super().__init__(**lowercase__ ,pad_token_id=lowercase__ ,bos_token_id=lowercase__ ,eos_token_id=lowercase__ )
__lowercase = hidden_size
__lowercase = feat_extract_activation
__lowercase = list(lowercase__ )
__lowercase = list(lowercase__ )
__lowercase = list(lowercase__ )
__lowercase = conv_bias
__lowercase = num_conv_pos_embeddings
__lowercase = num_conv_pos_embedding_groups
__lowercase = conv_pos_kernel_size
__lowercase = len(self.conv_dim )
__lowercase = num_hidden_layers
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = num_attention_heads
__lowercase = hidden_dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = feat_proj_dropout
__lowercase = final_dropout
__lowercase = layerdrop
__lowercase = layer_norm_eps
__lowercase = initializer_range
__lowercase = vocab_size
__lowercase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowercase = mask_time_prob
__lowercase = mask_time_length
__lowercase = mask_time_min_masks
__lowercase = mask_feature_prob
__lowercase = mask_feature_length
__lowercase = mask_feature_min_masks
# ctc loss
__lowercase = ctc_loss_reduction
__lowercase = ctc_zero_infinity
# adapter
__lowercase = add_adapter
__lowercase = adapter_kernel_size
__lowercase = adapter_stride
__lowercase = num_adapter_layers
__lowercase = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__lowercase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__lowercase = list(lowercase__ )
__lowercase = list(lowercase__ )
__lowercase = list(lowercase__ )
__lowercase = xvector_output_dim
@property
def SCREAMING_SNAKE_CASE ( self : str ):
return math.prod(self.conv_stride )
| 52 | 1 |
'''simple docstring'''
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : List[Any], SCREAMING_SNAKE_CASE__ : str=0 ) -> str:
# Format the message.
if name is None:
UpperCAmelCase_ : Dict = None
else:
UpperCAmelCase_ : str = '''.''' * max(0, spaces - 2 ) + '''# {:''' + str(50 - spaces ) + '''s}'''
UpperCAmelCase_ : Optional[Any] = fmt.format(SCREAMING_SNAKE_CASE__ )
# Print and recurse (if needed).
if isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
if msg is not None:
print(SCREAMING_SNAKE_CASE__ )
for k in val.keys():
recursive_print(SCREAMING_SNAKE_CASE__, val[k], spaces + 2 )
elif isinstance(SCREAMING_SNAKE_CASE__, torch.Tensor ):
print(SCREAMING_SNAKE_CASE__, ''':''', val.size() )
else:
print(SCREAMING_SNAKE_CASE__, ''':''', SCREAMING_SNAKE_CASE__ )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Union[str, Any], SCREAMING_SNAKE_CASE__ : List[str], SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : Optional[int], SCREAMING_SNAKE_CASE__ : Any ) -> Union[str, Any]:
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
UpperCAmelCase_ : Dict = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
UpperCAmelCase_ : List[str] = (num_heads, hidden_size, num_splits) + input_shape[1:]
UpperCAmelCase_ : str = param.view(*SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : List[str] = param.transpose(0, 2 )
UpperCAmelCase_ : Any = param.transpose(1, 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
UpperCAmelCase_ : Any = (num_heads, num_splits, hidden_size) + input_shape[1:]
UpperCAmelCase_ : Tuple = param.view(*SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Optional[Any] = param.transpose(0, 1 ).contiguous()
UpperCAmelCase_ : Tuple = param.view(*SCREAMING_SNAKE_CASE__ )
return param
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Union[str, Any], SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : int ) -> Optional[Any]:
# The converted output model.
UpperCAmelCase_ : List[str] = {}
# old versions did not store training args
UpperCAmelCase_ : Dict = input_state_dict.get('''args''', SCREAMING_SNAKE_CASE__ )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
UpperCAmelCase_ : Tuple = ds_args.padded_vocab_size
UpperCAmelCase_ : Optional[int] = ds_args.max_position_embeddings
UpperCAmelCase_ : Tuple = ds_args.hidden_size
UpperCAmelCase_ : Union[str, Any] = ds_args.num_layers
UpperCAmelCase_ : int = ds_args.num_attention_heads
UpperCAmelCase_ : Tuple = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
UpperCAmelCase_ : List[Any] = config.n_head
# The hidden_size per head.
UpperCAmelCase_ : Any = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
UpperCAmelCase_ : Tuple = input_state_dict['''checkpoint_version''']
else:
UpperCAmelCase_ : List[Any] = 0.0
# The model.
UpperCAmelCase_ : Optional[int] = input_state_dict['''model''']
# The language model.
UpperCAmelCase_ : List[str] = model['''language_model''']
# The embeddings.
UpperCAmelCase_ : str = lm['''embedding''']
# The word embeddings.
UpperCAmelCase_ : Optional[Any] = embeddings['''word_embeddings''']['''weight''']
# Truncate the embedding table to vocab_size rows.
UpperCAmelCase_ : Dict = word_embeddings[: config.vocab_size, :]
UpperCAmelCase_ : List[str] = word_embeddings
# The position embeddings.
UpperCAmelCase_ : Any = embeddings['''position_embeddings''']['''weight''']
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
UpperCAmelCase_ : Optional[int] = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F"""pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match""" )
# Store the position embeddings.
UpperCAmelCase_ : str = pos_embeddings
# The transformer.
UpperCAmelCase_ : List[str] = lm['''transformer'''] if '''transformer''' in lm.keys() else lm['''encoder''']
# The regex to extract layer names.
UpperCAmelCase_ : Union[str, Any] = re.compile(R'''layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)''' )
# The simple map of names for "automated" rules.
UpperCAmelCase_ : Tuple = {
'''attention.dense''': '''.attn.c_proj.''',
'''self_attention.dense''': '''.attn.c_proj.''',
'''mlp.dense_h_to_4h''': '''.mlp.c_fc.''',
'''mlp.dense_4h_to_h''': '''.mlp.c_proj.''',
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
UpperCAmelCase_ : str = layer_re.match(SCREAMING_SNAKE_CASE__ )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
UpperCAmelCase_ : List[Any] = int(m.group(1 ) )
# The name of the operation.
UpperCAmelCase_ : int = m.group(2 )
# Is it a weight or a bias?
UpperCAmelCase_ : Any = m.group(3 )
# The name of the layer.
UpperCAmelCase_ : Tuple = F"""transformer.h.{layer_idx}"""
# For layernorm(s), simply store the layer norm.
if op_name.endswith('''layernorm''' ):
UpperCAmelCase_ : List[Any] = '''ln_1''' if op_name.startswith('''input''' ) else '''ln_2'''
UpperCAmelCase_ : List[Any] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
UpperCAmelCase_ : List[Any] = torch.tril(torch.ones((n_positions, n_positions), dtype=torch.floataa ) ).view(
1, 1, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Dict = causal_mask
# Insert a "dummy" tensor for masked_bias.
UpperCAmelCase_ : Any = torch.tensor(-1E4, dtype=torch.floataa )
UpperCAmelCase_ : Dict = masked_bias
UpperCAmelCase_ : Optional[Any] = fix_query_key_value_ordering(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, 3, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
UpperCAmelCase_ : Optional[Any] = out_val.transpose(0, 1 ).contiguous()
# Store.
UpperCAmelCase_ : str = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
UpperCAmelCase_ : List[str] = fix_query_key_value_ordering(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, 3, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
# Store. No change of shape.
UpperCAmelCase_ : int = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
UpperCAmelCase_ : Union[str, Any] = megatron_to_transformers[op_name]
UpperCAmelCase_ : Optional[Any] = val.transpose(0, 1 )
# Copy the bias.
elif weight_or_bias == "bias":
UpperCAmelCase_ : Optional[Any] = megatron_to_transformers[op_name]
UpperCAmelCase_ : Union[str, Any] = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
UpperCAmelCase_ : Tuple = transformer['''final_layernorm.weight''']
UpperCAmelCase_ : Any = transformer['''final_layernorm.bias''']
# For LM head, transformers' wants the matrix to weight embeddings.
UpperCAmelCase_ : Optional[int] = word_embeddings
# It should be done!
return output_state_dict
def lowerCamelCase_ ( ) -> int:
# Create the argument parser.
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--print-checkpoint-structure''', action='''store_true''' )
parser.add_argument(
'''path_to_checkpoint''', type=SCREAMING_SNAKE_CASE__, help='''Path to the checkpoint file (.zip archive or direct .pt file)''', )
parser.add_argument(
'''--config_file''', default='''''', type=SCREAMING_SNAKE_CASE__, help='''An optional config json file describing the pre-trained model.''', )
UpperCAmelCase_ : Optional[int] = parser.parse_args()
# Extract the basename.
UpperCAmelCase_ : Tuple = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F"""Extracting PyTorch state dictionary from {args.path_to_checkpoint}""" )
if args.path_to_checkpoint.endswith('''.zip''' ):
with zipfile.ZipFile(args.path_to_checkpoint, '''r''' ) as checkpoint:
with checkpoint.open('''release/mp_rank_00/model_optim_rng.pt''' ) as pytorch_dict:
UpperCAmelCase_ : Tuple = torch.load(SCREAMING_SNAKE_CASE__, map_location='''cpu''' )
else:
UpperCAmelCase_ : Optional[int] = torch.load(args.path_to_checkpoint, map_location='''cpu''' )
UpperCAmelCase_ : int = input_state_dict.get('''args''', SCREAMING_SNAKE_CASE__ )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
UpperCAmelCase_ : int = '''gelu_fast'''
elif ds_args.openai_gelu:
UpperCAmelCase_ : int = '''gelu_new'''
else:
UpperCAmelCase_ : List[Any] = '''gelu'''
else:
# in the very early days this used to be "gelu_new"
UpperCAmelCase_ : str = '''gelu_new'''
# Spell out all parameters in case the defaults change.
UpperCAmelCase_ : Tuple = GPTaConfig(
vocab_size=50257, n_positions=1024, n_embd=1024, n_layer=24, n_head=16, n_inner=4096, activation_function=SCREAMING_SNAKE_CASE__, resid_pdrop=0.1, embd_pdrop=0.1, attn_pdrop=0.1, layer_norm_epsilon=1E-5, initializer_range=0.02, summary_type='''cls_index''', summary_use_proj=SCREAMING_SNAKE_CASE__, summary_activation=SCREAMING_SNAKE_CASE__, summary_proj_to_labels=SCREAMING_SNAKE_CASE__, summary_first_dropout=0.1, scale_attn_weights=SCREAMING_SNAKE_CASE__, use_cache=SCREAMING_SNAKE_CASE__, bos_token_id=50256, eos_token_id=50256, )
else:
UpperCAmelCase_ : Dict = GPTaConfig.from_json_file(args.config_file )
UpperCAmelCase_ : Optional[int] = ['''GPT2LMHeadModel''']
# Convert.
print('''Converting''' )
UpperCAmelCase_ : int = convert_megatron_checkpoint(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
UpperCAmelCase_ : Any = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
UpperCAmelCase_ : int = '''gpt2'''
elif tokenizer_type == "PretrainedFromHF":
UpperCAmelCase_ : Any = ds_args.tokenizer_name_or_path
else:
raise ValueError(F"""Unrecognized tokenizer_type {tokenizer_type}""" )
else:
UpperCAmelCase_ : str = '''gpt2'''
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : int = type(SCREAMING_SNAKE_CASE__ ).__name__
UpperCAmelCase_ : Optional[int] = tokenizer_class
# Store the config to file.
print('''Saving config''' )
config.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Save tokenizer based on args
print(F"""Adding {tokenizer_class} tokenizer files""" )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Store the state_dict to file.
UpperCAmelCase_ : str = os.path.join(SCREAMING_SNAKE_CASE__, '''pytorch_model.bin''' )
print(F"""Saving checkpoint to \"{output_checkpoint_file}\"""" )
torch.save(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 125 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class __a :
def __init__( self : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : Optional[int]=13 , __magic_name__ : str=7 , __magic_name__ : Dict=True , __magic_name__ : Dict=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : Tuple=99 , __magic_name__ : List[str]=32 , __magic_name__ : int=2 , __magic_name__ : List[str]=4 , __magic_name__ : Tuple=37 , __magic_name__ : Dict="gelu" , __magic_name__ : int=0.1 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : Optional[int]=5_12 , __magic_name__ : Tuple=16 , __magic_name__ : Optional[int]=2 , __magic_name__ : Optional[int]=0.0_2 , __magic_name__ : Dict=3 , __magic_name__ : str=4 , __magic_name__ : Optional[Any]=None , __magic_name__ : Any=0 , ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : str = parent
UpperCAmelCase_ : List[Any] = batch_size
UpperCAmelCase_ : List[Any] = seq_length
UpperCAmelCase_ : Dict = is_training
UpperCAmelCase_ : Optional[Any] = use_input_mask
UpperCAmelCase_ : Tuple = use_token_type_ids
UpperCAmelCase_ : int = use_labels
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : Union[str, Any] = hidden_size
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : Any = num_attention_heads
UpperCAmelCase_ : Any = intermediate_size
UpperCAmelCase_ : Dict = hidden_act
UpperCAmelCase_ : Tuple = hidden_dropout_prob
UpperCAmelCase_ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : str = max_position_embeddings
UpperCAmelCase_ : str = type_vocab_size
UpperCAmelCase_ : List[str] = type_sequence_label_size
UpperCAmelCase_ : Tuple = initializer_range
UpperCAmelCase_ : str = num_labels
UpperCAmelCase_ : Tuple = num_choices
UpperCAmelCase_ : Union[str, Any] = scope
UpperCAmelCase_ : Union[str, Any] = projection_dim
def UpperCAmelCase__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
UpperCAmelCase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Dict = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
UpperCAmelCase_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Tuple = None
if self.use_token_type_ids:
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : int = None
if self.use_labels:
UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : Optional[Any] = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , )
UpperCAmelCase_ : List[str] = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : str , __magic_name__ : str , __magic_name__ : Tuple , __magic_name__ : int , __magic_name__ : List[str] , __magic_name__ : List[str] , __magic_name__ : Tuple , __magic_name__ : Any ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = TFDPRContextEncoder(config=__magic_name__ )
UpperCAmelCase_ : Tuple = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ )
UpperCAmelCase_ : int = model(__magic_name__ , token_type_ids=__magic_name__ )
UpperCAmelCase_ : Any = model(__magic_name__ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : int , __magic_name__ : Dict , __magic_name__ : Tuple , __magic_name__ : Dict , __magic_name__ : str , __magic_name__ : Any , __magic_name__ : Tuple ) -> int:
"""simple docstring"""
UpperCAmelCase_ : List[str] = TFDPRQuestionEncoder(config=__magic_name__ )
UpperCAmelCase_ : Optional[int] = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ )
UpperCAmelCase_ : Optional[int] = model(__magic_name__ , token_type_ids=__magic_name__ )
UpperCAmelCase_ : List[Any] = model(__magic_name__ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Any , __magic_name__ : int , __magic_name__ : Optional[int] , __magic_name__ : Tuple , __magic_name__ : List[Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : int = TFDPRReader(config=__magic_name__ )
UpperCAmelCase_ : Tuple = model(__magic_name__ , attention_mask=__magic_name__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Optional[int] = config_and_inputs
UpperCAmelCase_ : Any = {'''input_ids''': input_ids}
return config, inputs_dict
@require_tf
class __a (lowerCamelCase , lowerCamelCase , unittest.TestCase ):
__a : Any = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
__a : int = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {}
__a : str = False
__a : str = False
__a : Dict = False
__a : Optional[Any] = False
__a : Any = False
def UpperCAmelCase__ ( self : int ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = TFDPRModelTester(self )
UpperCAmelCase_ : Dict = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def UpperCAmelCase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*__magic_name__ )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*__magic_name__ )
def UpperCAmelCase__ ( self : int ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*__magic_name__ )
@slow
def UpperCAmelCase__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Union[str, Any] = TFDPRContextEncoder.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Optional[Any] = TFDPRContextEncoder.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Tuple = TFDPRQuestionEncoder.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Tuple = TFDPRReader.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
@require_tf
class __a (unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self : Optional[int] ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Any = TFDPRQuestionEncoder.from_pretrained('''facebook/dpr-question_encoder-single-nq-base''' )
UpperCAmelCase_ : Optional[int] = tf.constant(
[[1_01, 75_92, 10_10, 20_03, 20_26, 38_99, 1_01_40, 10_29, 1_02]] ) # [CLS] hello, is my dog cute? [SEP]
UpperCAmelCase_ : List[Any] = model(__magic_name__ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
UpperCAmelCase_ : List[str] = tf.constant(
[
[
0.0_3_2_3_6_2_5_3,
0.1_2_7_5_3_3_3_5,
0.1_6_8_1_8_5_0_9,
0.0_0_2_7_9_7_8_6,
0.3_8_9_6_9_3_3,
0.2_4_2_6_4_9_4_5,
0.2_1_7_8_9_7_1,
-0.0_2_3_3_5_2_2_7,
-0.0_8_4_8_1_9_5_9,
-0.1_4_3_2_4_1_1_7,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 125 | 1 |
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class _UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ):
_UpperCamelCase : Optional[Any] = CpmAntTokenizer
_UpperCamelCase : List[Any] = False
def lowercase ( self: List[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
UpperCamelCase_ = [
"<d>",
"</d>",
"<s>",
"</s>",
"</_>",
"<unk>",
"<pad>",
"</n>",
"我",
"是",
"C",
"P",
"M",
"A",
"n",
"t",
]
UpperCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
@tooslow
def lowercase ( self: int ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = CpmAntTokenizer.from_pretrained("openbmb/cpm-ant-10b" )
UpperCamelCase_ = "今天天气真好!"
UpperCamelCase_ = ["今天", "天气", "真", "好", "!"]
UpperCamelCase_ = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = "今天天气真好!"
UpperCamelCase_ = [tokenizer.bos_token] + tokens
UpperCamelCase_ = [6, 9802, 14962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = tokenizer.decode(_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 328 |
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
_UpperCAmelCase = '\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n'
_UpperCAmelCase = '\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n'
_UpperCAmelCase = '\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for \'record\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'prediction_text\': the predicted answer text\n - for \'multirc\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question-answer pair as specified by the dataset\n - \'prediction\': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for \'record\': list of question-answers dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'answers\': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for \'record\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1\': F1 score\n - for \'multirc\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1_m\': Per-question macro-F1 score\n - \'f1_a\': Average F1 score over all answers\n - for \'axb\':\n \'matthews_correlation\': Matthew Correlation\n - for \'cb\':\n - \'accuracy\': Accuracy\n - \'f1\': F1 score\n - for all others:\n - \'accuracy\': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')\n >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]\n >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')\n >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]:
return float((preds == labels).mean() )
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_="binary" ) -> Tuple:
UpperCamelCase_ = simple_accuracy(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase_ = float(fa_score(y_true=UpperCamelCase_ , y_pred=UpperCamelCase_ , average=UpperCamelCase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
UpperCamelCase_ = {}
for id_pred, label in zip(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase_ = F'''{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}'''
UpperCamelCase_ = id_pred["prediction"]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
UpperCamelCase_ = [(pred, label)]
UpperCamelCase_ , UpperCamelCase_ = [], []
for question, preds_labels in question_map.items():
UpperCamelCase_ , UpperCamelCase_ = zip(*UpperCamelCase_ )
UpperCamelCase_ = fa_score(y_true=UpperCamelCase_ , y_pred=UpperCamelCase_ , average="macro" )
fas.append(UpperCamelCase_ )
UpperCamelCase_ = int(sum(pred == label for pred, label in preds_labels ) == len(UpperCamelCase_ ) )
ems.append(UpperCamelCase_ )
UpperCamelCase_ = float(sum(UpperCamelCase_ ) / len(UpperCamelCase_ ) )
UpperCamelCase_ = sum(UpperCamelCase_ ) / len(UpperCamelCase_ )
UpperCamelCase_ = float(fa_score(y_true=UpperCamelCase_ , y_pred=[id_pred["prediction"] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCamelCase ( datasets.Metric ):
def lowercase ( self: Optional[int] ) -> Optional[int]:
"""simple docstring"""
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="numpy" if not self.config_name == "record" and not self.config_name == "multirc" else None , )
def lowercase ( self: List[Any] ) -> int:
"""simple docstring"""
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("int64" ),
"query": datasets.Value("int64" ),
},
"prediction_text": datasets.Value("string" ),
},
"references": {
"idx": {
"passage": datasets.Value("int64" ),
"query": datasets.Value("int64" ),
},
"answers": datasets.Sequence(datasets.Value("string" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("int64" ),
"paragraph": datasets.Value("int64" ),
"question": datasets.Value("int64" ),
},
"prediction": datasets.Value("int64" ),
},
"references": datasets.Value("int64" ),
}
else:
return {
"predictions": datasets.Value("int64" ),
"references": datasets.Value("int64" ),
}
def lowercase ( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: List[str] ) -> Dict:
"""simple docstring"""
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
elif self.config_name == "cb":
return acc_and_fa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , fa_avg="macro" )
elif self.config_name == "record":
UpperCamelCase_ = [
{
"qas": [
{"id": ref["idx"]["query"], "answers": [{"text": ans} for ans in ref["answers"]]}
for ref in references
]
}
]
UpperCamelCase_ = {pred["idx"]["query"]: pred["prediction_text"] for pred in predictions}
return evaluate_record(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )[0]
elif self.config_name == "multirc":
return evaluate_multirc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" )
| 328 | 1 |
"""simple docstring"""
def lowerCAmelCase__ ( _UpperCamelCase : int ) -> int:
"""simple docstring"""
snake_case = [1]
snake_case ,snake_case ,snake_case = 0, 0, 0
snake_case = ugly_nums[ia] * 2
snake_case = ugly_nums[ia] * 3
snake_case = ugly_nums[ia] * 5
for _ in range(1 , _UpperCamelCase ):
snake_case = min(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
ugly_nums.append(_UpperCamelCase )
if next_num == next_a:
ia += 1
snake_case = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
snake_case = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
snake_case = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f"""{ugly_numbers(200) = }""")
| 150 | """simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def lowerCAmelCase__ ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[int]=False ) -> Optional[Any]:
"""simple docstring"""
snake_case = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""deit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""deit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""deit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""deit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""deit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""deit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""deit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""deit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""deit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""deit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'deit.embeddings.cls_token'),
('dist_token', 'deit.embeddings.distillation_token'),
('patch_embed.proj.weight', 'deit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'deit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'deit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
snake_case = [(pair[0], pair[1][4:]) if pair[1].startswith('deit' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('norm.weight', 'deit.layernorm.weight'),
('norm.bias', 'deit.layernorm.bias'),
('head.weight', 'cls_classifier.weight'),
('head.bias', 'cls_classifier.bias'),
('head_dist.weight', 'distillation_classifier.weight'),
('head_dist.bias', 'distillation_classifier.bias'),
] )
return rename_keys
def lowerCAmelCase__ ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Tuple=False ) -> Union[str, Any]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
snake_case = ''
else:
snake_case = 'deit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
snake_case = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case = in_proj_weight[
: config.hidden_size, :
]
snake_case = in_proj_bias[: config.hidden_size]
snake_case = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case = in_proj_weight[
-config.hidden_size :, :
]
snake_case = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase__ ( _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : int ) -> Any:
"""simple docstring"""
snake_case = dct.pop(_UpperCamelCase )
snake_case = val
def lowerCAmelCase__ ( ) -> Dict:
"""simple docstring"""
snake_case = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
return im
@torch.no_grad()
def lowerCAmelCase__ ( _UpperCamelCase : Dict , _UpperCamelCase : int ) -> Optional[Any]:
"""simple docstring"""
snake_case = DeiTConfig()
# all deit models have fine-tuned heads
snake_case = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
snake_case = 1_0_0_0
snake_case = 'huggingface/label-files'
snake_case = 'imagenet-1k-id2label.json'
snake_case = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type='dataset' ) , 'r' ) )
snake_case = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
snake_case = idalabel
snake_case = {v: k for k, v in idalabel.items()}
snake_case = int(deit_name[-6:-4] )
snake_case = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('tiny' ):
snake_case = 1_9_2
snake_case = 7_6_8
snake_case = 1_2
snake_case = 3
elif deit_name[9:].startswith('small' ):
snake_case = 3_8_4
snake_case = 1_5_3_6
snake_case = 1_2
snake_case = 6
if deit_name[9:].startswith('base' ):
pass
elif deit_name[4:].startswith('large' ):
snake_case = 1_0_2_4
snake_case = 4_0_9_6
snake_case = 2_4
snake_case = 1_6
# load original model from timm
snake_case = timm.create_model(_UpperCamelCase , pretrained=_UpperCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case = timm_model.state_dict()
snake_case = create_rename_keys(_UpperCamelCase , _UpperCamelCase )
for src, dest in rename_keys:
rename_key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
read_in_q_k_v(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# load HuggingFace model
snake_case = DeiTForImageClassificationWithTeacher(_UpperCamelCase ).eval()
model.load_state_dict(_UpperCamelCase )
# Check outputs on an image, prepared by DeiTImageProcessor
snake_case = int(
(2_5_6 / 2_2_4) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
snake_case = DeiTImageProcessor(size=_UpperCamelCase , crop_size=config.image_size )
snake_case = image_processor(images=prepare_img() , return_tensors='pt' )
snake_case = encoding['pixel_values']
snake_case = model(_UpperCamelCase )
snake_case = timm_model(_UpperCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_UpperCamelCase , outputs.logits , atol=1e-3 )
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
print(f"""Saving model {deit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_UpperCamelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--deit_name",
default="vit_deit_base_distilled_patch16_224",
type=str,
help="Name of the DeiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 150 | 1 |
"""simple docstring"""
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase : str = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( A , unittest.TestCase ):
"""simple docstring"""
__a = AlbertTokenizer
__a = AlbertTokenizerFast
__a = True
__a = True
__a = True
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCAmelCase : List[str] = AlbertTokenizer(UpperCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : str ):
'''simple docstring'''
__UpperCAmelCase : Dict = """this is a test"""
__UpperCAmelCase : Dict = """this is a test"""
return input_text, output_text
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Any = """<pad>"""
__UpperCAmelCase : List[str] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase ) , UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase ) , UpperCamelCase )
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """▁eloquent""" )
self.assertEqual(len(UpperCamelCase ) , 30_000 )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 30_000 )
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__UpperCAmelCase : Union[str, Any] = self.get_tokenizer()
__UpperCAmelCase : List[Any] = self.get_rust_tokenizer()
__UpperCAmelCase : int = """I was born in 92000, and this is falsé."""
__UpperCAmelCase : int = tokenizer.tokenize(UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = rust_tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : str = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
__UpperCAmelCase : Optional[int] = rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = self.get_rust_tokenizer()
__UpperCAmelCase : Tuple = tokenizer.encode(UpperCamelCase )
__UpperCAmelCase : List[Any] = rust_tokenizer.encode(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Dict = AlbertTokenizer(UpperCamelCase , keep_accents=UpperCamelCase )
__UpperCAmelCase : Optional[int] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(UpperCamelCase , ["""▁this""", """▁is""", """▁a""", """▁test"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , [48, 25, 21, 1_289] )
__UpperCAmelCase : str = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCamelCase , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] )
__UpperCAmelCase : Tuple = tokenizer.convert_tokens_to_ids(UpperCamelCase )
self.assertListEqual(UpperCamelCase , [31, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9] )
__UpperCAmelCase : Dict = tokenizer.convert_ids_to_tokens(UpperCamelCase )
self.assertListEqual(
UpperCamelCase , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , )
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = AlbertTokenizer(UpperCamelCase )
__UpperCAmelCase : Tuple = tokenizer.encode("""sequence builders""" )
__UpperCAmelCase : Any = tokenizer.encode("""multi-sequence build""" )
__UpperCAmelCase : Optional[int] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase )
__UpperCAmelCase : int = tokenizer.build_inputs_with_special_tokens(UpperCamelCase , UpperCamelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : List[str] = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 21_970, 13, 5, 6_092, 167, 28, 7_103, 2_153, 673, 8, 7_028, 12_051, 18, 17, 7_103, 2_153, 673, 8, 3_515, 18_684, 8, 4_461, 6, 1_927, 297, 8, 12_060, 2_607, 18, 13, 5, 4_461, 15, 10_538, 38, 8, 135, 15, 822, 58, 15, 993, 10_363, 15, 1_460, 8_005, 4_461, 15, 993, 255, 2_328, 9, 9, 9, 6, 26, 1_112, 816, 3_260, 13, 5, 103, 2_377, 6, 17, 1_112, 816, 2_782, 13, 5, 103, 10_641, 6, 29, 84, 2_512, 2_430, 782, 18_684, 2_761, 19, 808, 2_430, 2_556, 17, 855, 1_480, 9_477, 4_091, 128, 11_712, 15, 7_103, 2_153, 673, 17, 24_883, 9_990, 9, 3], [2, 11_502, 25, 1_006, 20, 782, 8, 11_809, 855, 1_732, 19_393, 18_667, 37, 367, 21_018, 69, 1_854, 34, 11_860, 19_124, 27, 156, 225, 17, 193, 4_141, 19, 65, 9_124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2_231, 886, 2_385, 17_659, 84, 14, 16_792, 1_952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , )
| 320 |
"""simple docstring"""
UpperCAmelCase : Dict = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
def lowerCamelCase ( _UpperCamelCase : bytes ) -> bytes:
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
__UpperCAmelCase : Any = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(_UpperCamelCase )
__UpperCAmelCase : str = """""".join(bin(_UpperCamelCase )[2:].zfill(8 ) for byte in data )
__UpperCAmelCase : int = len(_UpperCamelCase ) % 6 != 0
if padding_needed:
# The padding that will be added later
__UpperCAmelCase : Dict = b"""=""" * ((6 - len(_UpperCamelCase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_UpperCamelCase ) % 6)
else:
__UpperCAmelCase : List[str] = b""""""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(_UpperCamelCase ) , 6 ) ).encode()
+ padding
)
def lowerCamelCase ( _UpperCamelCase : str ) -> bytes:
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ) and not isinstance(_UpperCamelCase , _UpperCamelCase ):
__UpperCAmelCase : Tuple = (
"""argument should be a bytes-like object or ASCII string, """
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(_UpperCamelCase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_UpperCamelCase , _UpperCamelCase ):
try:
__UpperCAmelCase : Optional[Any] = encoded_data.decode("""utf-8""" )
except UnicodeDecodeError:
raise ValueError("""base64 encoded data should only contain ASCII characters""" )
__UpperCAmelCase : str = encoded_data.count("""=""" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_UpperCamelCase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
__UpperCAmelCase : List[str] = encoded_data[:-padding]
__UpperCAmelCase : int = """""".join(
bin(B64_CHARSET.index(_UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
__UpperCAmelCase : Optional[Any] = """""".join(
bin(B64_CHARSET.index(_UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data )
__UpperCAmelCase : List[Any] = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(_UpperCamelCase ) , 8 )
]
return bytes(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 320 | 1 |
'''simple docstring'''
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class _lowercase :
def __init__( self: List[str] , UpperCamelCase__: Dict , UpperCamelCase__: Dict=13 , UpperCamelCase__: int=30 , UpperCamelCase__: Tuple=2 , UpperCamelCase__: Any=3 , UpperCamelCase__: List[str]=True , UpperCamelCase__: Optional[Any]=True , UpperCamelCase__: Optional[int]=32 , UpperCamelCase__: Tuple=5 , UpperCamelCase__: Any=4 , UpperCamelCase__: str=37 , UpperCamelCase__: Union[str, Any]="gelu" , UpperCamelCase__: str=0.1 , UpperCamelCase__: List[Any]=0.1 , UpperCamelCase__: Union[str, Any]=10 , UpperCamelCase__: Optional[Any]=0.02 , UpperCamelCase__: Optional[Any]=3 , UpperCamelCase__: Any=None , UpperCamelCase__: Any=2 , ):
lowerCamelCase__ : str = parent
lowerCamelCase__ : List[str] = batch_size
lowerCamelCase__ : Tuple = image_size
lowerCamelCase__ : Any = patch_size
lowerCamelCase__ : Dict = num_channels
lowerCamelCase__ : List[str] = is_training
lowerCamelCase__ : Dict = use_labels
lowerCamelCase__ : List[str] = hidden_size
lowerCamelCase__ : str = num_hidden_layers
lowerCamelCase__ : Any = num_attention_heads
lowerCamelCase__ : str = intermediate_size
lowerCamelCase__ : str = hidden_act
lowerCamelCase__ : int = hidden_dropout_prob
lowerCamelCase__ : Dict = attention_probs_dropout_prob
lowerCamelCase__ : int = type_sequence_label_size
lowerCamelCase__ : Tuple = initializer_range
lowerCamelCase__ : Optional[int] = scope
lowerCamelCase__ : Tuple = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowerCamelCase__ : str = (image_size // patch_size) ** 2
lowerCamelCase__ : int = num_patches + 2
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[int] = None
if self.use_labels:
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Optional[int] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self: Dict ):
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Optional[Any] ):
lowerCamelCase__ : Any = DeiTModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Tuple = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple ):
lowerCamelCase__ : List[str] = DeiTForMaskedImageModeling(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Any = model(UpperCamelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCamelCase__ : List[str] = 1
lowerCamelCase__ : Optional[int] = DeiTForMaskedImageModeling(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : int = model(UpperCamelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: Dict , UpperCamelCase__: Tuple , UpperCamelCase__: List[str] ):
lowerCamelCase__ : str = self.type_sequence_label_size
lowerCamelCase__ : Union[str, Any] = DeiTForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase__ : List[Any] = 1
lowerCamelCase__ : List[Any] = DeiTForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : List[str] = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : Union[str, Any] = config_and_inputs
lowerCamelCase__ : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
a = (
{
"""feature-extraction""": DeiTModel,
"""image-classification""": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
a = False
a = False
a = False
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : List[str] = DeiTModelTester(self )
lowerCamelCase__ : List[Any] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self: int ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def lowerCamelCase_ ( self: Optional[Any] ):
pass
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase__ : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ , lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[Any] = model_class(UpperCamelCase__ )
lowerCamelCase__ : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Tuple = [*signature.parameters.keys()]
lowerCamelCase__ : Dict = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase__ )
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: Dict , UpperCamelCase__: List[str] , UpperCamelCase__: Dict=False ):
lowerCamelCase__ : List[Any] = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowerCamelCase_ ( self: Dict ):
if not self.model_tester.is_training:
return
lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : List[str] = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(UpperCamelCase__ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
lowerCamelCase__ : Optional[int] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.train()
lowerCamelCase__ : Any = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
lowerCamelCase__ : str = model(**UpperCamelCase__ ).loss
loss.backward()
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowerCamelCase__ : Tuple = False
lowerCamelCase__ : Any = True
for model_class in self.all_model_classes:
if model_class in get_values(UpperCamelCase__ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
lowerCamelCase__ : Optional[int] = model_class(UpperCamelCase__ )
model.gradient_checkpointing_enable()
model.to(UpperCamelCase__ )
model.train()
lowerCamelCase__ : Optional[int] = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
lowerCamelCase__ : int = model(**UpperCamelCase__ ).loss
loss.backward()
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : str = [
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(UpperCamelCase__ ),
*get_values(UpperCamelCase__ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'''Testing {model_class} with {problem_type['title']}''' ):
lowerCamelCase__ : int = problem_type["""title"""]
lowerCamelCase__ : Any = problem_type["""num_labels"""]
lowerCamelCase__ : List[Any] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.train()
lowerCamelCase__ : Dict = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if problem_type["num_labels"] > 1:
lowerCamelCase__ : List[Any] = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] )
lowerCamelCase__ : List[Any] = inputs["""labels"""].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=UpperCamelCase__ ) as warning_list:
lowerCamelCase__ : List[Any] = model(**UpperCamelCase__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def lowerCamelCase_ ( self: Union[str, Any] ):
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Optional[Any] = DeiTModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ () -> List[str]:
lowerCamelCase__ : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: List[Any] ):
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Optional[int] = DeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ).to(
UpperCamelCase__ )
lowerCamelCase__ : Dict = self.default_image_processor
lowerCamelCase__ : Optional[int] = prepare_img()
lowerCamelCase__ : str = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : List[str] = model(**UpperCamelCase__ )
# verify the logits
lowerCamelCase__ : Union[str, Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCamelCase__ : Any = torch.tensor([-1.0_266, 0.1_912, -1.2_861] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : Optional[Any] = DeiTModel.from_pretrained(
"""facebook/deit-base-distilled-patch16-224""" , torch_dtype=torch.floataa , device_map="""auto""" )
lowerCamelCase__ : List[Any] = self.default_image_processor
lowerCamelCase__ : List[str] = prepare_img()
lowerCamelCase__ : Tuple = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" )
lowerCamelCase__ : Optional[Any] = inputs.pixel_values.to(UpperCamelCase__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
lowerCamelCase__ : int = model(UpperCamelCase__ )
| 41 | '''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Optional[int] = ["input_features", "is_longer"]
def __init__( self : str , A : int=64 , A : Dict=48000 , A : str=480 , A : List[Any]=10 , A : Optional[Any]=1024 , A : Tuple=0.0 , A : List[Any]=False , A : float = 0 , A : float = 14000 , A : int = None , A : str = "fusion" , A : str = "repeatpad" , **A : Dict , ):
super().__init__(
feature_size=A , sampling_rate=A , padding_value=A , return_attention_mask=A , **A , )
_UpperCAmelCase : Optional[Any] = top_db
_UpperCAmelCase : Dict = truncation
_UpperCAmelCase : List[Any] = padding
_UpperCAmelCase : Optional[Any] = fft_window_size
_UpperCAmelCase : Dict = (fft_window_size >> 1) + 1
_UpperCAmelCase : Any = hop_length
_UpperCAmelCase : Tuple = max_length_s
_UpperCAmelCase : str = max_length_s * sampling_rate
_UpperCAmelCase : Any = sampling_rate
_UpperCAmelCase : Optional[int] = frequency_min
_UpperCAmelCase : str = frequency_max
_UpperCAmelCase : Union[str, Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=A , min_frequency=A , max_frequency=A , sampling_rate=A , norm=A , mel_scale="htk" , )
_UpperCAmelCase : Tuple = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=A , min_frequency=A , max_frequency=A , sampling_rate=A , norm="slaney" , mel_scale="slaney" , )
def _A ( self : List[str] ):
_UpperCAmelCase : Union[str, Any] = copy.deepcopy(self.__dict__ )
_UpperCAmelCase : Dict = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def _A ( self : Optional[Any] , A : np.array , A : Optional[np.array] = None ):
_UpperCAmelCase : Dict = spectrogram(
A , window_function(self.fft_window_size , "hann" ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=A , log_mel="dB" , )
return log_mel_spectrogram.T
def _A ( self : str , A : str , A : List[str] , A : List[Any] ):
_UpperCAmelCase : List[str] = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
_UpperCAmelCase : Optional[Any] = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
_UpperCAmelCase : Tuple = [0]
# randomly choose index for each part
_UpperCAmelCase : Dict = np.random.choice(ranges[0] )
_UpperCAmelCase : str = np.random.choice(ranges[1] )
_UpperCAmelCase : Tuple = np.random.choice(ranges[2] )
_UpperCAmelCase : str = mel[idx_front : idx_front + chunk_frames, :]
_UpperCAmelCase : str = mel[idx_middle : idx_middle + chunk_frames, :]
_UpperCAmelCase : List[Any] = mel[idx_back : idx_back + chunk_frames, :]
_UpperCAmelCase : Dict = torch.tensor(mel[None, None, :] )
_UpperCAmelCase : Optional[Any] = torch.nn.functional.interpolate(
A , size=[chunk_frames, 64] , mode="bilinear" , align_corners=A )
_UpperCAmelCase : List[str] = mel_shrink[0][0].numpy()
_UpperCAmelCase : str = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def _A ( self : List[Any] , A : np.array , A : List[str] , A : Any , A : Optional[int] ):
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
_UpperCAmelCase : int = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
_UpperCAmelCase : str = len(A ) - max_length
_UpperCAmelCase : str = np.random.randint(0 , overflow + 1 )
_UpperCAmelCase : int = waveform[idx : idx + max_length]
_UpperCAmelCase : Any = self._np_extract_fbank_features(A , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
_UpperCAmelCase : Tuple = self._np_extract_fbank_features(A , self.mel_filters )
_UpperCAmelCase : List[str] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
_UpperCAmelCase : Optional[Any] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
_UpperCAmelCase : Any = np.stack([mel, mel, mel, mel] , axis=0 )
_UpperCAmelCase : int = False
else:
_UpperCAmelCase : Tuple = self._random_mel_fusion(A , A , A )
_UpperCAmelCase : Any = True
else:
raise NotImplementedError(F"""data_truncating {truncation} not implemented""" )
else:
_UpperCAmelCase : Optional[Any] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
_UpperCAmelCase : str = int(max_length / len(A ) )
_UpperCAmelCase : Dict = np.stack(np.tile(A , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
_UpperCAmelCase : Dict = int(max_length / len(A ) )
_UpperCAmelCase : List[str] = np.stack(np.tile(A , A ) )
_UpperCAmelCase : Optional[Any] = np.pad(A , (0, max_length - waveform.shape[0]) , mode="constant" , constant_values=0 )
if truncation == "fusion":
_UpperCAmelCase : str = self._np_extract_fbank_features(A , self.mel_filters )
_UpperCAmelCase : Optional[int] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
_UpperCAmelCase : List[str] = self._np_extract_fbank_features(A , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Union[str, Any] , A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , A : str = None , A : Optional[str] = None , A : Optional[int] = None , A : Optional[int] = None , A : Optional[Union[str, TensorType]] = None , **A : List[str] , ):
_UpperCAmelCase : int = truncation if truncation is not None else self.truncation
_UpperCAmelCase : Optional[int] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
_UpperCAmelCase : Any = isinstance(A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
_UpperCAmelCase : Optional[Any] = is_batched_numpy or (
isinstance(A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_UpperCAmelCase : int = [np.asarray(A , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(A , np.ndarray ):
_UpperCAmelCase : List[str] = np.asarray(A , dtype=np.floataa )
elif isinstance(A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_UpperCAmelCase : Any = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_UpperCAmelCase : List[str] = [np.asarray(A )]
# convert to mel spectrogram, truncate and pad if needed.
_UpperCAmelCase : Dict = [
self._get_input_mel(A , max_length if max_length else self.nb_max_samples , A , A )
for waveform in raw_speech
]
_UpperCAmelCase : int = []
_UpperCAmelCase : Optional[Any] = []
for mel, longer in padded_inputs:
input_mel.append(A )
is_longer.append(A )
if truncation == "fusion" and sum(A ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
_UpperCAmelCase : Union[str, Any] = np.random.randint(0 , len(A ) )
_UpperCAmelCase : Optional[Any] = True
if isinstance(input_mel[0] , A ):
_UpperCAmelCase : List[str] = [np.asarray(A , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
_UpperCAmelCase : Tuple = [[longer] for longer in is_longer]
_UpperCAmelCase : Optional[Any] = {"input_features": input_mel, "is_longer": is_longer}
_UpperCAmelCase : Tuple = BatchFeature(A )
if return_tensors is not None:
_UpperCAmelCase : List[Any] = input_features.convert_to_tensors(A )
return input_features
| 31 | 0 |
def UpperCamelCase( lowercase_ ) -> int:
'''simple docstring'''
if not isinstance(lowercase_ , lowercase_ ):
raise ValueError("""multiplicative_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""multiplicative_persistence() does not accept negative values""" )
snake_case_ = 0
snake_case_ = str(lowercase_ )
while len(lowercase_ ) != 1:
snake_case_ = [int(lowercase_ ) for i in num_string]
snake_case_ = 1
for i in range(0 , len(lowercase_ ) ):
total *= numbers[i]
snake_case_ = str(lowercase_ )
steps += 1
return steps
def UpperCamelCase( lowercase_ ) -> int:
'''simple docstring'''
if not isinstance(lowercase_ , lowercase_ ):
raise ValueError("""additive_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""additive_persistence() does not accept negative values""" )
snake_case_ = 0
snake_case_ = str(lowercase_ )
while len(lowercase_ ) != 1:
snake_case_ = [int(lowercase_ ) for i in num_string]
snake_case_ = 0
for i in range(0 , len(lowercase_ ) ):
total += numbers[i]
snake_case_ = str(lowercase_ )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod() | 34 |
import logging
from transformers.configuration_utils import PretrainedConfig
lowerCamelCase_ = logging.getLogger(__name__)
class __lowerCamelCase ( __snake_case ):
lowerCamelCase_ : Optional[int] = 'masked_bert'
def __init__( self , lowerCamelCase=30522 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=1e-12 , lowerCamelCase=0 , lowerCamelCase="topK" , lowerCamelCase="constant" , lowerCamelCase=0.0 , **lowerCamelCase , ) -> List[str]:
super().__init__(pad_token_id=lowerCamelCase , **lowerCamelCase )
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = hidden_act
snake_case_ = intermediate_size
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = pruning_method
snake_case_ = mask_init
snake_case_ = mask_scale | 34 | 1 |
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
__UpperCAmelCase = input('Enter image url: ').strip()
print(F'Downloading image from {url} ...')
__UpperCAmelCase = BeautifulSoup(requests.get(url).content, 'html.parser')
# The image URL is in the content field of the first meta tag with property og:image
__UpperCAmelCase = soup.find('meta', {'property': 'og:image'})['content']
__UpperCAmelCase = requests.get(image_url).content
__UpperCAmelCase = F'{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg'
with open(file_name, 'wb') as fp:
fp.write(image_data)
print(F'Done. Image saved to disk as {file_name}.')
| 29 | """simple docstring"""
def lowercase ( a__ : float , a__ : float ) -> float:
if density <= 0:
raise ValueError('''Impossible fluid density''' )
if bulk_modulus <= 0:
raise ValueError('''Impossible bulk modulus''' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 256 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
lowerCamelCase : str = logging.get_logger(__name__)
lowerCamelCase : Any = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase : int = {
'vocab_file': {'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'},
'tokenizer_file': {
'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'
},
}
lowerCamelCase : Tuple = {'mobilebert-uncased': 5_1_2}
lowerCamelCase : Tuple = {}
class __lowercase (_UpperCAmelCase ):
"""simple docstring"""
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_INIT_CONFIGURATION
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = MobileBertTokenizer
def __init__( self , A=None , A=None , A=True , A="[UNK]" , A="[SEP]" , A="[PAD]" , A="[CLS]" , A="[MASK]" , A=True , A=None , **A , ) -> List[str]:
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , do_lower_case=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , tokenize_chinese_chars=_UpperCAmelCase , strip_accents=_UpperCAmelCase , **_UpperCAmelCase , )
snake_case : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , _UpperCAmelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , _UpperCAmelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , _UpperCAmelCase ) != tokenize_chinese_chars
):
snake_case : Optional[Any] = getattr(_UpperCAmelCase , normalizer_state.pop("""type""" ) )
snake_case : Any = do_lower_case
snake_case : List[Any] = strip_accents
snake_case : List[Any] = tokenize_chinese_chars
snake_case : Tuple = normalizer_class(**_UpperCAmelCase )
snake_case : Dict = do_lower_case
def UpperCAmelCase ( self , A , A=None ) -> Optional[int]:
snake_case : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase ( self , A , A = None ) -> int:
snake_case : Union[str, Any] = [self.sep_token_id]
snake_case : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase ( self , A , A = None ) -> str:
snake_case : Optional[Any] = self._tokenizer.model.save(_UpperCAmelCase , name=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
| 352 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase : Optional[Any] = {
'configuration_roberta': ['ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RobertaConfig', 'RobertaOnnxConfig'],
'tokenization_roberta': ['RobertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Tuple = ['RobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = [
'ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaForCausalLM',
'RobertaForMaskedLM',
'RobertaForMultipleChoice',
'RobertaForQuestionAnswering',
'RobertaForSequenceClassification',
'RobertaForTokenClassification',
'RobertaModel',
'RobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = [
'TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaForCausalLM',
'TFRobertaForMaskedLM',
'TFRobertaForMultipleChoice',
'TFRobertaForQuestionAnswering',
'TFRobertaForSequenceClassification',
'TFRobertaForTokenClassification',
'TFRobertaMainLayer',
'TFRobertaModel',
'TFRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Any = [
'FlaxRobertaForCausalLM',
'FlaxRobertaForMaskedLM',
'FlaxRobertaForMultipleChoice',
'FlaxRobertaForQuestionAnswering',
'FlaxRobertaForSequenceClassification',
'FlaxRobertaForTokenClassification',
'FlaxRobertaModel',
'FlaxRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
lowerCamelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 176 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
"configuration_nllb_moe": [
"NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP",
"NllbMoeConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST",
"NllbMoeForConditionalGeneration",
"NllbMoeModel",
"NllbMoePreTrainedModel",
"NllbMoeTop2Router",
"NllbMoeSparseMLP",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def lowerCAmelCase_ ( snake_case_ ):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
_A : str = k.replace(snake_case_,snake_case_ )
if k.startswith("""encoder""" ):
_A : Optional[Any] = k.replace(""".attn""",""".self_attn""" )
_A : Dict = k.replace("""norm1""","""self_attn_layer_norm""" )
_A : Optional[Any] = k.replace("""norm2""","""final_layer_norm""" )
elif k.startswith("""decoder""" ):
_A : str = k.replace("""norm1""","""self_attn_layer_norm""" )
_A : Any = k.replace("""norm2""","""encoder_attn_layer_norm""" )
_A : Optional[int] = k.replace("""norm3""","""final_layer_norm""" )
return k
def lowerCAmelCase_ ( snake_case_ ):
_A : List[Any] = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
_A : str = sd.pop(snake_case_ )
_A : Optional[int] = k.replace("""layernorm_embedding""","""layer_norm""" )
assert new_k not in sd
_A : Optional[int] = v
_snake_case = ["START"]
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Tuple = torch.load(snake_case_,map_location="""cpu""" )
_A : List[Any] = model["""model"""]
_A : Optional[Any] = BlenderbotConfig.from_json_file(snake_case_ )
_A : List[str] = BlenderbotForConditionalGeneration(snake_case_ )
_A : Tuple = m.model.state_dict().keys()
_A : Any = []
_A : Dict = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
_A : Optional[int] = rename_state_dict_key(snake_case_ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
_A : Dict = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(snake_case_ )
m.model.load_state_dict(snake_case_,strict=snake_case_ )
m.half()
m.save_pretrained(snake_case_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
_snake_case = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 26 | 1 |
import math
class __lowercase :
"""simple docstring"""
def __init__( self : int , lowerCAmelCase__ : List[Any]=0): # a graph with Node 0,1,...,N-1
SCREAMING_SNAKE_CASE_: List[Any] = n
SCREAMING_SNAKE_CASE_: Union[str, Any] = [
[math.inf for j in range(0 , lowerCAmelCase__)] for i in range(0 , lowerCAmelCase__)
] # adjacency matrix for weight
SCREAMING_SNAKE_CASE_: List[Any] = [
[math.inf for j in range(0 , lowerCAmelCase__)] for i in range(0 , lowerCAmelCase__)
] # dp[i][j] stores minimum distance from i to j
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase__ : Any , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[int]):
SCREAMING_SNAKE_CASE_: List[Any] = w
def _SCREAMING_SNAKE_CASE ( self : int):
for k in range(0 , self.n):
for i in range(0 , self.n):
for j in range(0 , self.n):
SCREAMING_SNAKE_CASE_: Tuple = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j])
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Union[str, Any]):
return self.dp[u][v]
if __name__ == "__main__":
lowerCAmelCase : Dict = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 127 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
lowerCAmelCase : List[str] = """Run commands across TPU VMs for initial setup before running `accelerate launch`."""
def A_ ( _UpperCAmelCase=None ):
if subparsers is not None:
SCREAMING_SNAKE_CASE_: Optional[Any] = subparsers.add_parser("tpu-config" , description=_description )
else:
SCREAMING_SNAKE_CASE_: Optional[int] = argparse.ArgumentParser("Accelerate tpu-config command" , description=_description )
# Core arguments
SCREAMING_SNAKE_CASE_: Any = parser.add_argument_group(
"Config Arguments" , "Arguments that can be configured through `accelerate config`." )
config_args.add_argument(
"--config_file" , type=_UpperCAmelCase , default=_UpperCAmelCase , help="Path to the config file to use for accelerate." , )
config_args.add_argument(
"--tpu_name" , default=_UpperCAmelCase , help="The name of the TPU to use. If not specified, will use the TPU specified in the config file." , )
config_args.add_argument(
"--tpu_zone" , default=_UpperCAmelCase , help="The zone of the TPU to use. If not specified, will use the zone specified in the config file." , )
SCREAMING_SNAKE_CASE_: Tuple = parser.add_argument_group("TPU Arguments" , "Arguments for options ran inside the TPU." )
pod_args.add_argument(
"--use_alpha" , action="store_true" , help="Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`." , )
pod_args.add_argument(
"--command_file" , default=_UpperCAmelCase , help="The path to the file containing the commands to run on the pod on startup." , )
pod_args.add_argument(
"--command" , action="append" , nargs="+" , help="A command to run on the pod. Can be passed multiple times." , )
pod_args.add_argument(
"--install_accelerate" , action="store_true" , help="Whether to install accelerate on the pod. Defaults to False." , )
pod_args.add_argument(
"--accelerate_version" , default="latest" , help="The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub." , )
pod_args.add_argument(
"--debug" , action="store_true" , help="If set, will print the command that would be run instead of running it." )
if subparsers is not None:
parser.set_defaults(func=_UpperCAmelCase )
return parser
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Tuple = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[Any] = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
SCREAMING_SNAKE_CASE_: Any = defaults.command_file
if not args.command and defaults.commands is not None:
SCREAMING_SNAKE_CASE_: int = defaults.commands
if not args.tpu_name:
SCREAMING_SNAKE_CASE_: List[str] = defaults.tpu_name
if not args.tpu_zone:
SCREAMING_SNAKE_CASE_: Dict = defaults.tpu_zone
if args.accelerate_version == "dev":
SCREAMING_SNAKE_CASE_: Tuple = "git+https://github.com/huggingface/accelerate.git"
elif args.accelerate_version == "latest":
SCREAMING_SNAKE_CASE_: str = "accelerate -U"
elif isinstance(parse(args.accelerate_version ) , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] = f"accelerate=={args.accelerate_version}"
if not args.command_file and not args.command:
raise ValueError("You must specify either a command file or a command to run on the pod." )
if args.command_file:
with open(args.command_file , "r" ) as f:
SCREAMING_SNAKE_CASE_: Dict = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
SCREAMING_SNAKE_CASE_: Dict = ["cd /usr/share"]
if args.install_accelerate:
new_cmd += [f"pip install {args.accelerate_version}"]
new_cmd += args.command
SCREAMING_SNAKE_CASE_: List[str] = "; ".join(_UpperCAmelCase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
SCREAMING_SNAKE_CASE_: int = ["gcloud"]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f"Running {' '.join(_UpperCAmelCase )}" )
return
subprocess.run(_UpperCAmelCase )
print("Successfully setup pod." )
def A_ ( ):
SCREAMING_SNAKE_CASE_: List[str] = tpu_command_parser()
SCREAMING_SNAKE_CASE_: List[Any] = parser.parse_args()
tpu_command_launcher(_UpperCAmelCase )
| 127 | 1 |
from collections.abc import Sequence
def SCREAMING_SNAKE_CASE_ ( snake_case__ = None ) -> int:
if nums is None or not nums:
raise ValueError('''Input sequence should not be empty''' )
lowerCAmelCase = nums[0]
for i in range(1 , len(snake_case__ ) ):
lowerCAmelCase = nums[i]
lowerCAmelCase = max(snake_case__ , ans + num , snake_case__ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
lowercase__ : int = int(input('''Enter number of elements : ''').strip())
lowercase__ : Union[str, Any] = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n]
print(max_subsequence_sum(array))
| 338 | def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> List[str]:
lowerCAmelCase = len(snake_case__ )
for i in range(length - 1 ):
lowerCAmelCase = i
for k in range(i + 1 , snake_case__ ):
if collection[k] < collection[least]:
lowerCAmelCase = k
if least != i:
lowerCAmelCase , lowerCAmelCase = (collection[i], collection[least])
return collection
if __name__ == "__main__":
lowercase__ : Optional[int] = input('''Enter numbers separated by a comma:\n''').strip()
lowercase__ : str = [int(item) for item in user_input.split(''',''')]
print(selection_sort(unsorted))
| 338 | 1 |
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : torch.FloatTensor
lowerCamelCase_ : Optional[torch.FloatTensor] = None
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=0.999 , _UpperCamelCase="cosine" , ) -> Union[str, Any]:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(_UpperCamelCase ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_UpperCamelCase ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
snake_case_ : Union[str, Any] = []
for i in range(_UpperCamelCase ):
snake_case_ : Any = i / num_diffusion_timesteps
snake_case_ : List[str] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_UpperCamelCase ) / alpha_bar_fn(_UpperCamelCase ) , _UpperCamelCase ) )
return torch.tensor(_UpperCamelCase , dtype=torch.floataa )
class __lowerCAmelCase ( _a, _a ):
@register_to_config
def __init__(self , __magic_name__ = 1000 , __magic_name__ = "fixed_small_log" , __magic_name__ = True , __magic_name__ = 1.0 , __magic_name__ = "epsilon" , __magic_name__ = "squaredcos_cap_v2" , ) -> int:
'''simple docstring'''
if beta_schedule != "squaredcos_cap_v2":
raise ValueError('''UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'''' )
snake_case_ : Optional[int] = betas_for_alpha_bar(__magic_name__ )
snake_case_ : int = 1.0 - self.betas
snake_case_ : List[Any] = torch.cumprod(self.alphas , dim=0 )
snake_case_ : str = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
snake_case_ : Union[str, Any] = 1.0
# setable values
snake_case_ : int = None
snake_case_ : Optional[int] = torch.from_numpy(np.arange(0 , __magic_name__ )[::-1].copy() )
snake_case_ : str = variance_type
def lowerCamelCase (self , __magic_name__ , __magic_name__ = None ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def lowerCamelCase (self , __magic_name__ , __magic_name__ = None ) -> Dict:
'''simple docstring'''
snake_case_ : Any = num_inference_steps
snake_case_ : List[str] = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
snake_case_ : Dict = (np.arange(0 , __magic_name__ ) * step_ratio).round()[::-1].copy().astype(np.intaa )
snake_case_ : int = torch.from_numpy(__magic_name__ ).to(__magic_name__ )
def lowerCamelCase (self , __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__=None ) -> Dict:
'''simple docstring'''
if prev_timestep is None:
snake_case_ : List[str] = t - 1
snake_case_ : Tuple = self.alphas_cumprod[t]
snake_case_ : Any = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
snake_case_ : Any = 1 - alpha_prod_t
snake_case_ : Any = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
snake_case_ : Any = self.betas[t]
else:
snake_case_ : Optional[int] = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
snake_case_ : List[str] = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
snake_case_ : Tuple = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
snake_case_ : Tuple = torch.log(torch.clamp(__magic_name__ , min=1e-20 ) )
snake_case_ : Optional[Any] = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
snake_case_ : int = variance.log()
snake_case_ : List[Any] = beta.log()
snake_case_ : Optional[int] = (predicted_variance + 1) / 2
snake_case_ : Optional[int] = frac * max_log + (1 - frac) * min_log
return variance
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__=None , __magic_name__ = True , ) -> Union[UnCLIPSchedulerOutput, Tuple]:
'''simple docstring'''
snake_case_ : List[str] = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
snake_case_ , snake_case_ : Any = torch.split(__magic_name__ , sample.shape[1] , dim=1 )
else:
snake_case_ : Optional[Any] = None
# 1. compute alphas, betas
if prev_timestep is None:
snake_case_ : Dict = t - 1
snake_case_ : Optional[int] = self.alphas_cumprod[t]
snake_case_ : int = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
snake_case_ : str = 1 - alpha_prod_t
snake_case_ : List[str] = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
snake_case_ : List[Any] = self.betas[t]
snake_case_ : Union[str, Any] = self.alphas[t]
else:
snake_case_ : List[str] = 1 - alpha_prod_t / alpha_prod_t_prev
snake_case_ : Tuple = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
snake_case_ : str = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
snake_case_ : Tuple = model_output
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'''
''' for the UnCLIPScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
snake_case_ : Optional[Any] = torch.clamp(
__magic_name__ , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
snake_case_ : Optional[Any] = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
snake_case_ : Optional[Any] = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
snake_case_ : List[Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
snake_case_ : Optional[Any] = 0
if t > 0:
snake_case_ : Union[str, Any] = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=__magic_name__ , device=model_output.device )
snake_case_ : Union[str, Any] = self._get_variance(
__magic_name__ , predicted_variance=__magic_name__ , prev_timestep=__magic_name__ , )
if self.variance_type == "fixed_small_log":
snake_case_ : Dict = variance
elif self.variance_type == "learned_range":
snake_case_ : Any = (0.5 * variance).exp()
else:
raise ValueError(
F'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'''
''' for the UnCLIPScheduler.''' )
snake_case_ : Dict = variance * variance_noise
snake_case_ : str = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=__magic_name__ , pred_original_sample=__magic_name__ )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , ) -> torch.FloatTensor:
'''simple docstring'''
snake_case_ : Any = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
snake_case_ : List[str] = timesteps.to(original_samples.device )
snake_case_ : int = alphas_cumprod[timesteps] ** 0.5
snake_case_ : List[str] = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
snake_case_ : int = sqrt_alpha_prod.unsqueeze(-1 )
snake_case_ : Any = (1 - alphas_cumprod[timesteps]) ** 0.5
snake_case_ : Any = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
snake_case_ : Union[str, Any] = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
snake_case_ : Union[str, Any] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 279 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class __lowerCAmelCase ( nn.Module ):
lowerCamelCase_ : int
lowerCamelCase_ : int
lowerCamelCase_ : float = 0.0
lowerCamelCase_ : int = 1
lowerCamelCase_ : int = 1
lowerCamelCase_ : bool = True
lowerCamelCase_ : bool = False
lowerCamelCase_ : bool = False
lowerCamelCase_ : bool = False
lowerCamelCase_ : jnp.dtype = jnp.floataa
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : Tuple = []
snake_case_ : List[str] = []
for i in range(self.num_layers ):
snake_case_ : Tuple = self.in_channels if i == 0 else self.out_channels
snake_case_ : Dict = FlaxResnetBlockaD(
in_channels=__magic_name__ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__magic_name__ )
snake_case_ : str = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__magic_name__ )
snake_case_ : Union[str, Any] = resnets
snake_case_ : Union[str, Any] = attentions
if self.add_downsample:
snake_case_ : List[str] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=True ) -> List[Any]:
'''simple docstring'''
snake_case_ : str = ()
for resnet, attn in zip(self.resnets , self.attentions ):
snake_case_ : Optional[Any] = resnet(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
snake_case_ : List[str] = attn(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
output_states += (hidden_states,)
if self.add_downsample:
snake_case_ : Union[str, Any] = self.downsamplers_a(__magic_name__ )
output_states += (hidden_states,)
return hidden_states, output_states
class __lowerCAmelCase ( nn.Module ):
lowerCamelCase_ : int
lowerCamelCase_ : int
lowerCamelCase_ : float = 0.0
lowerCamelCase_ : int = 1
lowerCamelCase_ : bool = True
lowerCamelCase_ : jnp.dtype = jnp.floataa
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Optional[Any] = []
for i in range(self.num_layers ):
snake_case_ : List[Any] = self.in_channels if i == 0 else self.out_channels
snake_case_ : Tuple = FlaxResnetBlockaD(
in_channels=__magic_name__ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__magic_name__ )
snake_case_ : Dict = resnets
if self.add_downsample:
snake_case_ : Optional[int] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__(self , __magic_name__ , __magic_name__ , __magic_name__=True ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Any = ()
for resnet in self.resnets:
snake_case_ : List[Any] = resnet(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
output_states += (hidden_states,)
if self.add_downsample:
snake_case_ : str = self.downsamplers_a(__magic_name__ )
output_states += (hidden_states,)
return hidden_states, output_states
class __lowerCAmelCase ( nn.Module ):
lowerCamelCase_ : int
lowerCamelCase_ : int
lowerCamelCase_ : int
lowerCamelCase_ : float = 0.0
lowerCamelCase_ : int = 1
lowerCamelCase_ : int = 1
lowerCamelCase_ : bool = True
lowerCamelCase_ : bool = False
lowerCamelCase_ : bool = False
lowerCamelCase_ : bool = False
lowerCamelCase_ : jnp.dtype = jnp.floataa
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : Optional[Any] = []
snake_case_ : Optional[Any] = []
for i in range(self.num_layers ):
snake_case_ : List[Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
snake_case_ : Optional[int] = self.prev_output_channel if i == 0 else self.out_channels
snake_case_ : Dict = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__magic_name__ )
snake_case_ : List[str] = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__magic_name__ )
snake_case_ : List[Any] = resnets
snake_case_ : Tuple = attentions
if self.add_upsample:
snake_case_ : List[Any] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=True ) -> Union[str, Any]:
'''simple docstring'''
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
snake_case_ : Dict = res_hidden_states_tuple[-1]
snake_case_ : List[Any] = res_hidden_states_tuple[:-1]
snake_case_ : Optional[int] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
snake_case_ : Tuple = resnet(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
snake_case_ : Tuple = attn(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
if self.add_upsample:
snake_case_ : Optional[Any] = self.upsamplers_a(__magic_name__ )
return hidden_states
class __lowerCAmelCase ( nn.Module ):
lowerCamelCase_ : int
lowerCamelCase_ : int
lowerCamelCase_ : int
lowerCamelCase_ : float = 0.0
lowerCamelCase_ : int = 1
lowerCamelCase_ : bool = True
lowerCamelCase_ : jnp.dtype = jnp.floataa
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Union[str, Any] = []
for i in range(self.num_layers ):
snake_case_ : Tuple = self.in_channels if (i == self.num_layers - 1) else self.out_channels
snake_case_ : Optional[Any] = self.prev_output_channel if i == 0 else self.out_channels
snake_case_ : int = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__magic_name__ )
snake_case_ : Tuple = resnets
if self.add_upsample:
snake_case_ : List[str] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=True ) -> List[Any]:
'''simple docstring'''
for resnet in self.resnets:
# pop res hidden states
snake_case_ : Tuple = res_hidden_states_tuple[-1]
snake_case_ : List[Any] = res_hidden_states_tuple[:-1]
snake_case_ : Dict = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
snake_case_ : Optional[Any] = resnet(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
if self.add_upsample:
snake_case_ : Optional[int] = self.upsamplers_a(__magic_name__ )
return hidden_states
class __lowerCAmelCase ( nn.Module ):
lowerCamelCase_ : int
lowerCamelCase_ : float = 0.0
lowerCamelCase_ : int = 1
lowerCamelCase_ : int = 1
lowerCamelCase_ : bool = False
lowerCamelCase_ : bool = False
lowerCamelCase_ : jnp.dtype = jnp.floataa
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : Dict = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
snake_case_ : int = []
for _ in range(self.num_layers ):
snake_case_ : str = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__magic_name__ )
snake_case_ : Dict = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__magic_name__ )
snake_case_ : Optional[Any] = resnets
snake_case_ : Optional[int] = attentions
def __call__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=True ) -> Optional[int]:
'''simple docstring'''
snake_case_ : List[Any] = self.resnets[0](__magic_name__ , __magic_name__ )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
snake_case_ : Tuple = attn(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
snake_case_ : Union[str, Any] = resnet(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
return hidden_states
| 279 | 1 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case_ :
'''simple docstring'''
def __init__( self : Dict , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Dict=1_3 , _UpperCamelCase : Optional[Any]=3_2 , _UpperCamelCase : List[Any]=3 , _UpperCamelCase : List[str]=4 , _UpperCamelCase : Optional[int]=[1_0, 2_0, 3_0, 4_0] , _UpperCamelCase : Optional[int]=[2, 2, 3, 2] , _UpperCamelCase : Union[str, Any]=True , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : int=3_7 , _UpperCamelCase : Any="gelu" , _UpperCamelCase : Tuple=1_0 , _UpperCamelCase : str=0.02 , _UpperCamelCase : Optional[int]=["stage2", "stage3", "stage4"] , _UpperCamelCase : List[str]=3 , _UpperCamelCase : List[str]=None , ) ->str:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = num_channels
snake_case_ = num_stages
snake_case_ = hidden_sizes
snake_case_ = depths
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = out_features
snake_case_ = num_labels
snake_case_ = scope
snake_case_ = num_stages
def snake_case__( self : Optional[int] ) ->int:
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = self.get_config()
return config, pixel_values, labels
def snake_case__( self : int ) ->List[str]:
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def snake_case__( self : Optional[int] ) ->Optional[int]:
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=5_1_2 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=_UpperCamelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=4_0 , auxiliary_channels=2_5_6 , auxiliary_num_convs=1 , auxiliary_concat_input=_UpperCamelCase , loss_ignore_index=2_5_5 , num_labels=self.num_labels , )
def snake_case__( self : Optional[Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any] ) ->int:
snake_case_ = UperNetForSemanticSegmentation(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def snake_case__( self : List[Any] ) ->int:
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
), (
snake_case_
), (
snake_case_
),
) = config_and_inputs
snake_case_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class snake_case_ ( __A , __A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE : Union[str, Any] = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {}
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : List[str] = False
SCREAMING_SNAKE_CASE : str = False
def snake_case__( self : str ) ->int:
snake_case_ = UperNetModelTester(self )
snake_case_ = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=3_7 )
def snake_case__( self : int ) ->List[str]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case__( self : List[Any] ) ->Dict:
return
def snake_case__( self : Tuple ) ->str:
snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(_UpperCamelCase )
snake_case_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def snake_case__( self : List[str] ) ->Optional[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCamelCase )
@unittest.skip(reason='''UperNet does not use inputs_embeds''' )
def snake_case__( self : List[str] ) ->Tuple:
pass
@unittest.skip(reason='''UperNet does not support input and output embeddings''' )
def snake_case__( self : Any ) ->Tuple:
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def snake_case__( self : Any ) ->Optional[int]:
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def snake_case__( self : int ) ->str:
pass
@require_torch_multi_gpu
@unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def snake_case__( self : Dict ) ->Tuple:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def snake_case__( self : Optional[int] ) ->Dict:
pass
def snake_case__( self : Any ) ->Any:
def check_hidden_states_output(_UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] ):
snake_case_ = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
snake_case_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case_ = self.model_tester.num_stages
self.assertEqual(len(_UpperCamelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def snake_case__( self : Tuple ) ->Union[str, Any]:
snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = _config_zero_init(_UpperCamelCase )
snake_case_ = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
snake_case_ = model_class(config=_UpperCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip(reason='''UperNet does not have tied weights''' )
def snake_case__( self : List[Any] ) ->Tuple:
pass
@slow
def snake_case__( self : List[str] ) ->List[Any]:
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = UperNetForSemanticSegmentation.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ():
snake_case_ = hf_hub_download(
repo_id='''hf-internal-testing/fixtures_ade20k''' , repo_type='''dataset''' , filename='''ADE_val_00000001.jpg''' )
snake_case_ = Image.open(SCREAMING_SNAKE_CASE__ ).convert('''RGB''' )
return image
@require_torch
@require_vision
@slow
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Optional[Any] ) ->Dict:
snake_case_ = AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''' )
snake_case_ = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''' ).to(_UpperCamelCase )
snake_case_ = prepare_img()
snake_case_ = processor(images=_UpperCamelCase , return_tensors='''pt''' ).to(_UpperCamelCase )
with torch.no_grad():
snake_case_ = model(**_UpperCamelCase )
snake_case_ = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
snake_case_ = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _UpperCamelCase , atol=1e-4 ) )
def snake_case__( self : int ) ->Optional[Any]:
snake_case_ = AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''' )
snake_case_ = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''' ).to(_UpperCamelCase )
snake_case_ = prepare_img()
snake_case_ = processor(images=_UpperCamelCase , return_tensors='''pt''' ).to(_UpperCamelCase )
with torch.no_grad():
snake_case_ = model(**_UpperCamelCase )
snake_case_ = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
snake_case_ = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _UpperCamelCase , atol=1e-4 ) ) | 8 | '''simple docstring'''
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class UpperCAmelCase ( pl.LightningModule ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase ) -> List[str]:
super().__init__()
lowercase__ : List[str] = model
lowercase__ : Dict = 2
lowercase__ : Any = nn.Linear(self.model.config.hidden_size , self.num_labels )
def _lowerCAmelCase( self ) -> str:
pass
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
# load longformer model from model identifier
lowercase__ : Dict = LongformerModel.from_pretrained(UpperCAmelCase )
lowercase__ : List[str] = LightningModel(UpperCAmelCase )
lowercase__ : List[Any] = torch.load(UpperCAmelCase , map_location=torch.device('''cpu''' ) )
lightning_model.load_state_dict(ckpt['''state_dict'''] )
# init longformer question answering model
lowercase__ : Optional[int] = LongformerForQuestionAnswering.from_pretrained(UpperCAmelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(UpperCAmelCase )
print(F"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
__a: List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--longformer_model""",
default=None,
type=str,
required=True,
help="""model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.""",
)
parser.add_argument(
"""--longformer_question_answering_ckpt_path""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch Lightning Checkpoint.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__a: Tuple = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 198 | 0 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def _snake_case ( self )->int:
'''simple docstring'''
torch.manual_seed(0 )
A_ : List[str] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def _snake_case ( self )->str:
'''simple docstring'''
A_ : Any = self.dummy_uncond_unet
A_ : str = KarrasVeScheduler()
A_ : List[str] = KarrasVePipeline(unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = torch.manual_seed(0 )
A_ : Optional[Any] = pipe(num_inference_steps=2 , generator=_SCREAMING_SNAKE_CASE , output_type='''numpy''' ).images
A_ : Tuple = torch.manual_seed(0 )
A_ : List[Any] = pipe(num_inference_steps=2 , generator=_SCREAMING_SNAKE_CASE , output_type='''numpy''' , return_dict=_SCREAMING_SNAKE_CASE )[0]
A_ : Dict = image[0, -3:, -3:, -1]
A_ : List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A_ : Tuple = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self )->List[Any]:
'''simple docstring'''
A_ : Optional[Any] = """google/ncsnpp-celebahq-256"""
A_ : str = UNetaDModel.from_pretrained(_SCREAMING_SNAKE_CASE )
A_ : List[Any] = KarrasVeScheduler()
A_ : Tuple = KarrasVePipeline(unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ : Tuple = torch.manual_seed(0 )
A_ : List[Any] = pipe(num_inference_steps=20 , generator=_SCREAMING_SNAKE_CASE , output_type='''numpy''' ).images
A_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
A_ : Union[str, Any] = np.array([0.5_7_8, 0.5_8_1_1, 0.5_9_2_4, 0.5_8_0_9, 0.5_8_7, 0.5_8_8_6, 0.5_8_6_1, 0.5_8_0_2, 0.5_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 350 |
from bisect import bisect
from itertools import accumulate
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : List[Any] = sorted(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , key=lambda SCREAMING_SNAKE_CASE : x[0] / x[1] , reverse=SCREAMING_SNAKE_CASE )
A_ , A_ : str = [i[0] for i in r], [i[1] for i in r]
A_ : Tuple = list(accumulate(SCREAMING_SNAKE_CASE ) )
A_ : Optional[int] = bisect(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 | 0 |
"""simple docstring"""
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
__magic_name__ = {
"tiny.en": "https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt",
"tiny": "https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt",
"base.en": "https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt",
"base": "https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt",
"small.en": "https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt",
"small": "https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt",
"medium.en": "https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt",
"medium": "https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt",
"large": "https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt",
"large-v2": "https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt",
}
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = ["""layers""", """blocks"""]
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase , _UpperCAmelCase )
__magic_name__ = {
"blocks": "layers",
"mlp.0": "fc1",
"mlp.2": "fc2",
"mlp_ln": "final_layer_norm",
".attn.query": ".self_attn.q_proj",
".attn.key": ".self_attn.k_proj",
".attn.value": ".self_attn.v_proj",
".attn_ln": ".self_attn_layer_norm",
".attn.out": ".self_attn.out_proj",
".cross_attn.query": ".encoder_attn.q_proj",
".cross_attn.key": ".encoder_attn.k_proj",
".cross_attn.value": ".encoder_attn.v_proj",
".cross_attn_ln": ".encoder_attn_layer_norm",
".cross_attn.out": ".encoder_attn.out_proj",
"decoder.ln.": "decoder.layer_norm.",
"encoder.ln.": "encoder.layer_norm.",
"token_embedding": "embed_tokens",
"encoder.positional_embedding": "encoder.embed_positions.weight",
"decoder.positional_embedding": "decoder.embed_positions.weight",
"ln_post": "layer_norm",
}
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = list(s_dict.keys() )
for key in keys:
__SCREAMING_SNAKE_CASE = key
for k, v in WHISPER_MAPPING.items():
if k in key:
__SCREAMING_SNAKE_CASE = new_key.replace(_UpperCAmelCase , _UpperCAmelCase )
print(f"{key} -> {new_key}" )
__SCREAMING_SNAKE_CASE = s_dict.pop(_UpperCAmelCase )
return s_dict
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = emb.weight.shape
__SCREAMING_SNAKE_CASE = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase )
__SCREAMING_SNAKE_CASE = emb.weight.data
return lin_layer
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
__SCREAMING_SNAKE_CASE = os.path.basename(_UpperCAmelCase )
__SCREAMING_SNAKE_CASE = url.split("""/""" )[-2]
__SCREAMING_SNAKE_CASE = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if os.path.exists(_UpperCAmelCase ) and not os.path.isfile(_UpperCAmelCase ):
raise RuntimeError(f"{download_target} exists and is not a regular file" )
if os.path.isfile(_UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = open(_UpperCAmelCase , """rb""" ).read()
if hashlib.shaaaa(_UpperCAmelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" )
with urllib.request.urlopen(_UpperCAmelCase ) as source, open(_UpperCAmelCase , """wb""" ) as output:
with tqdm(
total=int(source.info().get("""Content-Length""" ) ) , ncols=80 , unit="""iB""" , unit_scale=_UpperCAmelCase , unit_divisor=1024 ) as loop:
while True:
__SCREAMING_SNAKE_CASE = source.read(8192 )
if not buffer:
break
output.write(_UpperCAmelCase )
loop.update(len(_UpperCAmelCase ) )
__SCREAMING_SNAKE_CASE = open(_UpperCAmelCase , """rb""" ).read()
if hashlib.shaaaa(_UpperCAmelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"""Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.""" )
return model_bytes
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
if ".pt" not in checkpoint_path:
__SCREAMING_SNAKE_CASE = _download(_MODELS[checkpoint_path] )
else:
__SCREAMING_SNAKE_CASE = torch.load(_UpperCAmelCase , map_location="""cpu""" )
__SCREAMING_SNAKE_CASE = original_checkpoint["""dims"""]
__SCREAMING_SNAKE_CASE = original_checkpoint["""model_state_dict"""]
__SCREAMING_SNAKE_CASE = state_dict["""decoder.token_embedding.weight"""]
remove_ignore_keys_(_UpperCAmelCase )
rename_keys(_UpperCAmelCase )
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = state_dict["""decoder.layers.0.fc1.weight"""].shape[0]
__SCREAMING_SNAKE_CASE = WhisperConfig(
vocab_size=dimensions["""n_vocab"""] , encoder_ffn_dim=_UpperCAmelCase , decoder_ffn_dim=_UpperCAmelCase , num_mel_bins=dimensions["""n_mels"""] , d_model=dimensions["""n_audio_state"""] , max_target_positions=dimensions["""n_text_ctx"""] , encoder_layers=dimensions["""n_audio_layer"""] , encoder_attention_heads=dimensions["""n_audio_head"""] , decoder_layers=dimensions["""n_text_layer"""] , decoder_attention_heads=dimensions["""n_text_state"""] , max_source_positions=dimensions["""n_audio_ctx"""] , )
__SCREAMING_SNAKE_CASE = WhisperForConditionalGeneration(_UpperCAmelCase )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = model.model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0 and not set(_UpperCAmelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"""Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"""
f" but all the following weights are missing {missing}" )
if tie_embeds:
__SCREAMING_SNAKE_CASE = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
__SCREAMING_SNAKE_CASE = proj_out_weights
model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Patht to the downloaded checkpoints")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
__magic_name__ = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 100 |
import os
UpperCAmelCase__ = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000}
def A ( _UpperCAmelCase : str ) -> int:
'''simple docstring'''
_UpperCAmelCase = 0
_UpperCAmelCase = 0
while index < len(_UpperCAmelCase ) - 1:
_UpperCAmelCase = SYMBOLS[numerals[index]]
_UpperCAmelCase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def A ( _UpperCAmelCase : int ) -> str:
'''simple docstring'''
_UpperCAmelCase = ''
_UpperCAmelCase = num // 1_000
numerals += m_count * "M"
num %= 1_000
_UpperCAmelCase = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
_UpperCAmelCase = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def A ( _UpperCAmelCase : str = "/p089_roman.txt" ) -> int:
'''simple docstring'''
_UpperCAmelCase = 0
with open(os.path.dirname(_UpperCAmelCase ) + roman_numerals_filename ) as filea:
_UpperCAmelCase = filea.readlines()
for line in lines:
_UpperCAmelCase = line.strip()
_UpperCAmelCase = parse_roman_numerals(_UpperCAmelCase )
_UpperCAmelCase = generate_roman_numerals(_UpperCAmelCase )
savings += len(_UpperCAmelCase ) - len(_UpperCAmelCase )
return savings
if __name__ == "__main__":
print(f"""{solution() = }""")
| 339 | 0 |
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = (UnCLIPScheduler,)
def UpperCAmelCase ( self , **A ) -> Any:
snake_case : List[Any] = {
"""num_train_timesteps""": 1_0_0_0,
"""variance_type""": """fixed_small_log""",
"""clip_sample""": True,
"""clip_sample_range""": 1.0,
"""prediction_type""": """epsilon""",
}
config.update(**A )
return config
def UpperCAmelCase ( self ) -> List[Any]:
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=A )
def UpperCAmelCase ( self ) -> Optional[int]:
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=A )
def UpperCAmelCase ( self ) -> Tuple:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=A )
def UpperCAmelCase ( self ) -> str:
for clip_sample_range in [1, 5, 1_0, 2_0]:
self.check_over_configs(clip_sample_range=A )
def UpperCAmelCase ( self ) -> str:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=A )
def UpperCAmelCase ( self ) -> Any:
for time_step in [0, 5_0_0, 9_9_9]:
for prev_timestep in [None, 5, 1_0_0, 2_5_0, 5_0_0, 7_5_0]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=A , prev_timestep=A )
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : Union[str, Any] = self.scheduler_classes[0]
snake_case : Tuple = self.get_scheduler_config(variance_type="""fixed_small_log""" )
snake_case : Any = scheduler_class(**A )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_0_0_0e-1_0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_54_96_25 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.9_99_49_87 ) ) < 1e-5
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : Optional[int] = self.scheduler_classes[0]
snake_case : Tuple = self.get_scheduler_config(variance_type="""learned_range""" )
snake_case : Tuple = scheduler_class(**A )
snake_case : Any = 0.5
assert scheduler._get_variance(1 , predicted_variance=A ) - -10.1_71_27_90 < 1e-5
assert scheduler._get_variance(4_8_7 , predicted_variance=A ) - -5.7_99_80_52 < 1e-5
assert scheduler._get_variance(9_9_9 , predicted_variance=A ) - -0.0_01_00_11 < 1e-5
def UpperCAmelCase ( self ) -> int:
snake_case : List[Any] = self.scheduler_classes[0]
snake_case : str = self.get_scheduler_config()
snake_case : str = scheduler_class(**A )
snake_case : Any = scheduler.timesteps
snake_case : List[str] = self.dummy_model()
snake_case : Optional[int] = self.dummy_sample_deter
snake_case : Optional[Any] = torch.manual_seed(0 )
for i, t in enumerate(A ):
# 1. predict noise residual
snake_case : List[str] = model(A , A )
# 2. predict previous mean of sample x_t-1
snake_case : Optional[Any] = scheduler.step(A , A , A , generator=A ).prev_sample
snake_case : Tuple = pred_prev_sample
snake_case : Optional[Any] = torch.sum(torch.abs(A ) )
snake_case : Union[str, Any] = torch.mean(torch.abs(A ) )
assert abs(result_sum.item() - 2_5_2.2_6_8_2_4_9_5 ) < 1e-2
assert abs(result_mean.item() - 0.3_28_47_43 ) < 1e-3
def UpperCAmelCase ( self ) -> List[str]:
snake_case : Optional[int] = self.scheduler_classes[0]
snake_case : Optional[Any] = self.get_scheduler_config()
snake_case : List[Any] = scheduler_class(**A )
scheduler.set_timesteps(2_5 )
snake_case : List[Any] = scheduler.timesteps
snake_case : str = self.dummy_model()
snake_case : int = self.dummy_sample_deter
snake_case : Optional[Any] = torch.manual_seed(0 )
for i, t in enumerate(A ):
# 1. predict noise residual
snake_case : Union[str, Any] = model(A , A )
if i + 1 == timesteps.shape[0]:
snake_case : List[str] = None
else:
snake_case : List[Any] = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
snake_case : Dict = scheduler.step(
A , A , A , prev_timestep=A , generator=A ).prev_sample
snake_case : Optional[int] = pred_prev_sample
snake_case : Optional[Any] = torch.sum(torch.abs(A ) )
snake_case : Union[str, Any] = torch.mean(torch.abs(A ) )
assert abs(result_sum.item() - 2_5_8.2_0_4_4_9_8_3 ) < 1e-2
assert abs(result_mean.item() - 0.3_36_20_38 ) < 1e-3
def UpperCAmelCase ( self ) -> Tuple:
pass
def UpperCAmelCase ( self ) -> Any:
pass
| 370 |
import functools
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> int:
# Validation
if not isinstance(lowercase ,lowercase ) or not all(isinstance(lowercase ,lowercase ) for day in days ):
raise ValueError("""The parameter days should be a list of integers""" )
if len(lowercase ) != 3 or not all(isinstance(lowercase ,lowercase ) for cost in costs ):
raise ValueError("""The parameter costs should be a list of three integers""" )
if len(lowercase ) == 0:
return 0
if min(lowercase ) <= 0:
raise ValueError("""All days elements should be greater than 0""" )
if max(lowercase ) >= 366:
raise ValueError("""All days elements should be less than 366""" )
snake_case : List[str] = set(lowercase )
@functools.cache
def dynamic_programming(lowercase ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) ,costs[1] + dynamic_programming(index + 7 ) ,costs[2] + dynamic_programming(index + 30 ) ,)
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 176 | 0 |
"""simple docstring"""
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def UpperCAmelCase__ ( lowerCAmelCase__ :str=3_2 , lowerCAmelCase__ :Dict=1_0 , lowerCAmelCase__ :List[str]=1_0_0 , lowerCAmelCase__ :Dict=1_0_2_6 , lowerCAmelCase__ :Any=True , lowerCAmelCase__ :Optional[int]="data/tokenized_stories_train_wikitext103.jbl" , lowerCAmelCase__ :Tuple="igf_context_pairs.jbl" , ) -> str:
'''simple docstring'''
set_seed(3 )
# generate train_data and objective_set
lowercase = generate_datasets(
lowerCAmelCase__ , lowerCAmelCase__ , number=lowerCAmelCase__ , min_len=1_0_2_6 , trim=lowerCAmelCase__ )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
lowercase = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# load pretrained model
lowercase = load_gpta("""gpt2""" ).to(lowerCAmelCase__ )
print("""computing perplexity on objective set""" )
lowercase = compute_perplexity(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).item()
print("""perplexity on objective set:""" , lowerCAmelCase__ )
# collect igf pairs and save to file demo.jbl
collect_objective_set(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def UpperCAmelCase__ ( lowerCAmelCase__ :int , lowerCAmelCase__ :List[str]=1_5 , lowerCAmelCase__ :List[str]=1_2_8 , lowerCAmelCase__ :Optional[int]=1_0_0 , lowerCAmelCase__ :str="igf_model.pt" , ) -> List[Any]:
'''simple docstring'''
set_seed(4_2 )
# Load pre-trained model
lowercase = GPTaLMHeadModel.from_pretrained("""gpt2""" )
# Initialize secondary learner to use embedding weights of model
lowercase = SecondaryLearner(lowerCAmelCase__ )
# Train secondary learner
lowercase = train_secondary_learner(
lowerCAmelCase__ , lowerCAmelCase__ , max_epochs=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , eval_freq=1_0_0 , igf_model_path=lowerCAmelCase__ , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Any , lowerCAmelCase__ :Any=3_2 , lowerCAmelCase__ :int=1_0_0_0 , lowerCAmelCase__ :Any=1_6 , lowerCAmelCase__ :Optional[Any]=1.0 , lowerCAmelCase__ :Optional[int]=recopy_gpta , lowerCAmelCase__ :List[str]=None , lowerCAmelCase__ :int=1_0 , lowerCAmelCase__ :Any="gpt2_finetuned.pt" , ) -> Optional[Any]:
'''simple docstring'''
lowercase = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
lowercase = RandomSampler(lowerCAmelCase__ )
lowercase = DataLoader(lowerCAmelCase__ , sampler=lowerCAmelCase__ )
lowercase = max_steps // (len(lowerCAmelCase__ )) + 1
lowercase = 0
lowercase = torch.zeros((1, context_len) , dtype=torch.long , device=lowerCAmelCase__ )
lowercase = recopy_model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
model.train()
if secondary_learner is not None:
secondary_learner.to(lowerCAmelCase__ )
secondary_learner.eval()
lowercase = []
lowercase = 0
lowercase = []
lowercase = []
# Compute the performance of the transformer model at the beginning
lowercase = compute_perplexity(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
test_perps.append(lowerCAmelCase__ )
print("""Test perplexity, step""" , lowerCAmelCase__ , """:""" , lowerCAmelCase__ )
for epoch in range(int(lowerCAmelCase__ ) ):
for step, example in enumerate(lowerCAmelCase__ ):
torch.cuda.empty_cache()
lowercase = random.randint(0 , example.size(2 ) - context_len - 1 )
lowercase = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
lowercase = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
lowercase = True
if secondary_learner is not None:
lowercase = secondary_learner.forward(
torch.tensor(lowerCAmelCase__ , dtype=torch.long , device=lowerCAmelCase__ ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(lowerCAmelCase__ ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 1_0:
lowercase = -1
if predicted_q < threshold:
lowercase = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
lowercase = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
lowercase = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
lowercase = compute_perplexity(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
test_perps.append(lowerCAmelCase__ )
print("""Test perplexity, step""" , lowerCAmelCase__ , """:""" , lowerCAmelCase__ )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 6_0:
break
if max_steps > 0 and global_step > 6_0:
break
# save finetuned transformer model
torch.save(model.state_dict() , lowerCAmelCase__ )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def UpperCAmelCase__ ( ) -> str:
'''simple docstring'''
lowercase = argparse.ArgumentParser(description="""Fine-tune a transformer model with IGF on a language modeling task""" )
# Required parameters
parser.add_argument(
"""--data_dir""" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="""The input data dir. Should contain data files for WikiText.""" , )
parser.add_argument(
"""--model_name_or_path""" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--data_file""" , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help=(
"""A jbl file containing tokenized data which can be split as objective dataset, """
"""train_dataset and test_dataset."""
) , )
parser.add_argument(
"""--igf_data_file""" , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help="""A jbl file containing the context and information gain pairs to train secondary learner.""" , )
parser.add_argument(
"""--output_dir""" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="""The output directory where the final fine-tuned model is stored.""" , )
parser.add_argument(
"""--tokenizer_name""" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , help="""Pretrained tokenizer name or path if not the same as model_name""" , )
parser.add_argument("""--seed""" , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help="""A seed for reproducible training.""" )
parser.add_argument(
"""--context_len""" , default=3_2 , type=lowerCAmelCase__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--size_objective_set""" , default=1_0_0 , type=lowerCAmelCase__ , help="""number of articles that are long enough to be used as our objective set""" , )
parser.add_argument(
"""--eval_freq""" , default=1_0_0 , type=lowerCAmelCase__ , help="""secondary model evaluation is triggered at eval_freq""" )
parser.add_argument("""--max_steps""" , default=1_0_0_0 , type=lowerCAmelCase__ , help="""To calculate training epochs""" )
parser.add_argument(
"""--secondary_learner_batch_size""" , default=1_2_8 , type=lowerCAmelCase__ , help="""batch size of training data for secondary learner""" , )
parser.add_argument(
"""--batch_size""" , default=1_6 , type=lowerCAmelCase__ , help="""batch size of training data of language model(gpt2) """ )
parser.add_argument(
"""--eval_interval""" , default=1_0 , type=lowerCAmelCase__ , help=(
"""decay the selectivity of our secondary learner filter from"""
"""1 standard deviation above average to 1 below average after 10 batches"""
) , )
parser.add_argument(
"""--number""" , default=1_0_0 , type=lowerCAmelCase__ , help="""The number of examples split to be used as objective_set/test_data""" )
parser.add_argument(
"""--min_len""" , default=1_0_2_6 , type=lowerCAmelCase__ , help="""The minimum length of the article to be used as objective set""" )
parser.add_argument(
"""--secondary_learner_max_epochs""" , default=1_5 , type=lowerCAmelCase__ , help="""number of epochs to train secondary learner""" )
parser.add_argument("""--trim""" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , help="""truncate the example if it exceeds context length""" )
parser.add_argument(
"""--threshold""" , default=1.0 , type=lowerCAmelCase__ , help=(
"""The threshold value used by secondary learner to filter the train_data and allow only"""
""" informative data as input to the model"""
) , )
parser.add_argument("""--finetuned_model_name""" , default="""gpt2_finetuned.pt""" , type=lowerCAmelCase__ , help="""finetuned_model_name""" )
parser.add_argument(
"""--recopy_model""" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , help="""Reset the model to the original pretrained GPT-2 weights after each iteration""" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=3_2 , max_steps=1_0 , size_objective_set=1_0_0 , min_len=1_0_2_6 , trim=lowerCAmelCase__ , data_file="""data/tokenized_stories_train_wikitext103.jbl""" , igf_data_file="""igf_context_pairs.jbl""" , )
# Load train data for secondary learner
lowercase = joblib.load("""data/IGF_values.jbl""" )
# Train secondary learner
lowercase = training_secondary_learner(
lowerCAmelCase__ , secondary_learner_max_epochs=1_5 , secondary_learner_batch_size=1_2_8 , eval_freq=1_0_0 , igf_model_path="""igf_model.pt""" , )
# load pretrained gpt2 model
lowercase = GPTaLMHeadModel.from_pretrained("""gpt2""" )
set_seed(4_2 )
# Generate train and test data to train and evaluate gpt2 model
lowercase = generate_datasets(
context_len=3_2 , file="""data/tokenized_stories_train_wikitext103.jbl""" , number=1_0_0 , min_len=1_0_2_6 , trim=lowerCAmelCase__ )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , context_len=3_2 , max_steps=1_0_0_0 , batch_size=1_6 , threshold=1.0 , recopy_model=lowerCAmelCase__ , secondary_learner=lowerCAmelCase__ , eval_interval=1_0 , finetuned_model_name="""gpt2_finetuned.pt""" , )
if __name__ == "__main__":
main()
| 197 |
'''simple docstring'''
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
UpperCamelCase__ = namedtuple(
'''_TestCommandArgs''',
[
'''dataset''',
'''name''',
'''cache_dir''',
'''data_dir''',
'''all_configs''',
'''save_infos''',
'''ignore_verifications''',
'''force_redownload''',
'''clear_cache''',
],
defaults=[None, None, None, False, False, False, False, False],
)
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
return (abs(source - target ) / target) < 0.0_1
@pytest.mark.integration
def a__ ( lowerCAmelCase__ ) -> Optional[int]:
UpperCAmelCase__ : str = _TestCommandArgs(dataset=lowerCAmelCase__ , all_configs=lowerCAmelCase__ , save_infos=lowerCAmelCase__ )
UpperCAmelCase__ : Tuple = TestCommand(*lowerCAmelCase__ )
test_command.run()
UpperCAmelCase__ : List[Any] = os.path.join(lowerCAmelCase__ , '''README.md''' )
assert os.path.exists(lowerCAmelCase__ )
UpperCAmelCase__ : List[str] = DatasetInfosDict.from_directory(lowerCAmelCase__ )
UpperCAmelCase__ : Any = DatasetInfosDict(
{
'''default''': DatasetInfo(
features=Features(
{
'''tokens''': Sequence(Value('''string''' ) ),
'''ner_tags''': Sequence(
ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ),
'''langs''': Sequence(Value('''string''' ) ),
'''spans''': Sequence(Value('''string''' ) ),
} ) , splits=[
{
'''name''': '''train''',
'''num_bytes''': 2_35_15_63,
'''num_examples''': 1_00_00,
},
{
'''name''': '''validation''',
'''num_bytes''': 23_84_18,
'''num_examples''': 10_00,
},
] , download_size=3_94_06_80 , dataset_size=2_58_99_81 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
UpperCAmelCase__ , UpperCAmelCase__ : str = getattr(dataset_infos['''default'''] , lowerCAmelCase__ ), getattr(expected_dataset_infos['''default'''] , lowerCAmelCase__ )
if key == "num_bytes":
assert is_apercent_close(lowerCAmelCase__ , lowerCAmelCase__ )
elif key == "splits":
assert list(lowerCAmelCase__ ) == list(lowerCAmelCase__ )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 181 | 0 |
'''simple docstring'''
def lowerCamelCase__ ( _A , _A ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
a : str = (boundary[1] - boundary[0]) / steps
a : Dict = boundary[0]
a : List[str] = boundary[1]
a : int = make_points(_A , _A , _A )
a : Any = 0.0
y += (h / 2.0) * f(_A )
for i in x_i:
# print(i)
y += h * f(_A )
y += (h / 2.0) * f(_A )
return y
def lowerCamelCase__ ( _A , _A , _A ):
a : List[str] = a + h
while x < (b - h):
yield x
a : int = x + h
def lowerCamelCase__ ( _A ): # enter your function here
a : Tuple = (x - 0) * (x - 0)
return y
def lowerCamelCase__ ( ):
a : List[Any] = 0.0 # Lower bound of integration
a : List[str] = 1.0 # Upper bound of integration
a : Optional[int] = 10.0 # define number of steps or resolution
a : Union[str, Any] = [a, b] # define boundary of integration
a : List[str] = method_a(_A , _A )
print(f"""y = {y}""" )
if __name__ == "__main__":
main() | 364 |
'''simple docstring'''
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
lowerCAmelCase: Dict = '\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
lowerCAmelCase: Optional[Any] = '\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n'
lowerCAmelCase: List[Any] = '\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=["About 95 species are currently accepted ."]\n >>> predictions=["About 95 you now get in ."]\n >>> references=[["About 95 species are currently known ."]]\n >>> wiki_split = datasets.load_metric("wiki_split")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}\n'
def lowerCamelCase__ ( _A ):
def remove_articles(_A ):
a : Union[str, Any] = re.compile(r'\b(a|an|the)\b' , re.UNICODE )
return re.sub(_A , ' ' , _A )
def white_space_fix(_A ):
return " ".join(text.split() )
def remove_punc(_A ):
a : Tuple = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_A ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_A ) ) ) )
def lowerCamelCase__ ( _A , _A ):
return int(normalize_answer(_A ) == normalize_answer(_A ) )
def lowerCamelCase__ ( _A , _A ):
a : List[Any] = [any(compute_exact(_A , _A ) for ref in refs ) for pred, refs in zip(_A , _A )]
return (sum(_A ) / len(_A )) * 100
def lowerCamelCase__ ( _A , _A , _A , _A ):
a : List[Any] = [rgram for rgrams in rgramslist for rgram in rgrams]
a : Any = Counter(_A )
a : Dict = Counter(_A )
a : Tuple = Counter()
for sgram, scount in sgramcounter.items():
a : List[str] = scount * numref
a : Optional[int] = Counter(_A )
a : Optional[int] = Counter()
for cgram, ccount in cgramcounter.items():
a : List[str] = ccount * numref
# KEEP
a : Optional[Any] = sgramcounter_rep & cgramcounter_rep
a : Union[str, Any] = keepgramcounter_rep & rgramcounter
a : Any = sgramcounter_rep & rgramcounter
a : str = 0
a : Optional[Any] = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
a : Tuple = 1
a : Any = 1
if len(_A ) > 0:
a : Optional[int] = keeptmpscorea / len(_A )
if len(_A ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
a : Tuple = keeptmpscorea / sum(keepgramcounterall_rep.values() )
a : List[str] = 0
if keepscore_precision > 0 or keepscore_recall > 0:
a : Dict = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
a : List[Any] = sgramcounter_rep - cgramcounter_rep
a : Any = delgramcounter_rep - rgramcounter
a : Union[str, Any] = sgramcounter_rep - rgramcounter
a : Tuple = 0
a : str = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
a : str = 1
if len(_A ) > 0:
a : Tuple = deltmpscorea / len(_A )
# ADDITION
a : Any = set(_A ) - set(_A )
a : Optional[int] = set(_A ) & set(_A )
a : Union[str, Any] = set(_A ) - set(_A )
a : List[str] = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
a : Tuple = 1
a : Optional[Any] = 1
if len(_A ) > 0:
a : str = addtmpscore / len(_A )
if len(_A ) > 0:
a : Optional[Any] = addtmpscore / len(_A )
a : str = 0
if addscore_precision > 0 or addscore_recall > 0:
a : List[str] = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def lowerCamelCase__ ( _A , _A , _A ):
a : List[str] = len(_A )
a : List[str] = ssent.split(' ' )
a : int = csent.split(' ' )
a : Optional[Any] = []
a : Tuple = []
a : Optional[Any] = []
a : Optional[Any] = []
a : Union[str, Any] = []
a : str = []
a : Dict = []
a : Dict = []
a : str = []
a : Any = []
for rsent in rsents:
a : List[Any] = rsent.split(' ' )
a : Dict = []
a : Optional[Any] = []
a : Optional[Any] = []
ragramslist.append(_A )
for i in range(0 , len(_A ) - 1 ):
if i < len(_A ) - 1:
a : List[str] = ragrams[i] + ' ' + ragrams[i + 1]
ragrams.append(_A )
if i < len(_A ) - 2:
a : Union[str, Any] = ragrams[i] + ' ' + ragrams[i + 1] + ' ' + ragrams[i + 2]
ragrams.append(_A )
if i < len(_A ) - 3:
a : Tuple = ragrams[i] + ' ' + ragrams[i + 1] + ' ' + ragrams[i + 2] + ' ' + ragrams[i + 3]
ragrams.append(_A )
ragramslist.append(_A )
ragramslist.append(_A )
ragramslist.append(_A )
for i in range(0 , len(_A ) - 1 ):
if i < len(_A ) - 1:
a : Tuple = sagrams[i] + ' ' + sagrams[i + 1]
sagrams.append(_A )
if i < len(_A ) - 2:
a : Union[str, Any] = sagrams[i] + ' ' + sagrams[i + 1] + ' ' + sagrams[i + 2]
sagrams.append(_A )
if i < len(_A ) - 3:
a : List[str] = sagrams[i] + ' ' + sagrams[i + 1] + ' ' + sagrams[i + 2] + ' ' + sagrams[i + 3]
sagrams.append(_A )
for i in range(0 , len(_A ) - 1 ):
if i < len(_A ) - 1:
a : Any = cagrams[i] + ' ' + cagrams[i + 1]
cagrams.append(_A )
if i < len(_A ) - 2:
a : Optional[Any] = cagrams[i] + ' ' + cagrams[i + 1] + ' ' + cagrams[i + 2]
cagrams.append(_A )
if i < len(_A ) - 3:
a : Optional[Any] = cagrams[i] + ' ' + cagrams[i + 1] + ' ' + cagrams[i + 2] + ' ' + cagrams[i + 3]
cagrams.append(_A )
((a) , (a) , (a)) : int = SARIngram(_A , _A , _A , _A )
((a) , (a) , (a)) : Optional[int] = SARIngram(_A , _A , _A , _A )
((a) , (a) , (a)) : Union[str, Any] = SARIngram(_A , _A , _A , _A )
((a) , (a) , (a)) : int = SARIngram(_A , _A , _A , _A )
a : Dict = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
a : Any = sum([delascore, delascore, delascore, delascore] ) / 4
a : Union[str, Any] = sum([addascore, addascore, addascore, addascore] ) / 4
a : str = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def lowerCamelCase__ ( _A , _A = True , _A = "13a" , _A = True ):
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
a : Dict = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
a : List[str] = sacrebleu.metrics.bleu._get_tokenizer(_A )()(_A )
else:
a : str = sacrebleu.TOKENIZERS[tokenizer]()(_A )
elif tokenizer == "moses":
a : List[Any] = sacremoses.MosesTokenizer().tokenize(_A , return_str=_A , escape=_A )
elif tokenizer == "penn":
a : Tuple = sacremoses.MosesTokenizer().penn_tokenize(_A , return_str=_A )
else:
a : List[Any] = sentence
if not return_str:
a : Optional[Any] = normalized_sent.split()
return normalized_sent
def lowerCamelCase__ ( _A , _A , _A ):
if not (len(_A ) == len(_A ) == len(_A )):
raise ValueError('Sources length must match predictions and references lengths.' )
a : Tuple = 0
for src, pred, refs in zip(_A , _A , _A ):
sari_score += SARIsent(normalize(_A ) , normalize(_A ) , [normalize(_A ) for sent in refs] )
a : Tuple = sari_score / len(_A )
return 100 * sari_score
def lowerCamelCase__ ( _A , _A , _A="exp" , _A=None , _A=False , _A=False , _A=False , ):
a : Optional[int] = len(references[0] )
if any(len(_A ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
a : List[Any] = [[refs[i] for refs in references] for i in range(_A )]
a : Optional[Any] = sacrebleu.corpus_bleu(
_A , _A , smooth_method=_A , smooth_value=_A , force=_A , lowercase=_A , use_effective_order=_A , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__( datasets.Metric ):
def lowercase_ ( self : Optional[int] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=[
'https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py',
'https://github.com/cocoxu/simplification/blob/master/SARI.py',
'https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py',
'https://github.com/mjpost/sacreBLEU',
] , reference_urls=[
'https://www.aclweb.org/anthology/Q16-1029.pdf',
'https://github.com/mjpost/sacreBLEU',
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def lowercase_ ( self : str , __snake_case : Optional[Any] , __snake_case : int , __snake_case : str ):
a : int = {}
result.update({'sari': compute_sari(sources=__snake_case , predictions=__snake_case , references=__snake_case )} )
result.update({'sacrebleu': compute_sacrebleu(predictions=__snake_case , references=__snake_case )} )
result.update({'exact': compute_em(predictions=__snake_case , references=__snake_case )} )
return result | 96 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase = {
'''configuration_roberta''': ['''ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RobertaConfig''', '''RobertaOnnxConfig'''],
'''tokenization_roberta''': ['''RobertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['''RobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RobertaForCausalLM''',
'''RobertaForMaskedLM''',
'''RobertaForMultipleChoice''',
'''RobertaForQuestionAnswering''',
'''RobertaForSequenceClassification''',
'''RobertaForTokenClassification''',
'''RobertaModel''',
'''RobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRobertaForCausalLM''',
'''TFRobertaForMaskedLM''',
'''TFRobertaForMultipleChoice''',
'''TFRobertaForQuestionAnswering''',
'''TFRobertaForSequenceClassification''',
'''TFRobertaForTokenClassification''',
'''TFRobertaMainLayer''',
'''TFRobertaModel''',
'''TFRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''FlaxRobertaForCausalLM''',
'''FlaxRobertaForMaskedLM''',
'''FlaxRobertaForMultipleChoice''',
'''FlaxRobertaForQuestionAnswering''',
'''FlaxRobertaForSequenceClassification''',
'''FlaxRobertaForTokenClassification''',
'''FlaxRobertaModel''',
'''FlaxRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 113 |
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : Any ) -> Dict:
A_ : Optional[Any] = nn.functional.normalize(_lowerCAmelCase )
A_ : List[str] = nn.functional.normalize(_lowerCAmelCase )
return torch.mm(_lowerCAmelCase , normalized_text_embeds.t() )
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = CLIPConfig
__UpperCamelCase = ['''CLIPEncoderLayer''']
def __init__( self :int , snake_case :CLIPConfig ):
'''simple docstring'''
super().__init__(snake_case )
A_ : int = CLIPVisionModel(config.vision_config )
A_ : List[str] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=snake_case )
A_ : Tuple = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=snake_case )
A_ : str = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=snake_case )
A_ : List[str] = nn.Parameter(torch.ones(17 ) , requires_grad=snake_case )
A_ : int = nn.Parameter(torch.ones(3 ) , requires_grad=snake_case )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :Dict , snake_case :Any ):
'''simple docstring'''
A_ : List[Any] = self.vision_model(snake_case )[1] # pooled_output
A_ : List[Any] = self.visual_projection(snake_case )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A_ : Optional[Any] = cosine_distance(snake_case , self.special_care_embeds ).cpu().float().numpy()
A_ : Tuple = cosine_distance(snake_case , self.concept_embeds ).cpu().float().numpy()
A_ : Union[str, Any] = []
A_ : Any = image_embeds.shape[0]
for i in range(snake_case ):
A_ : Optional[int] = {"special_scores": {}, "special_care": [], "concept_scores": {}, "bad_concepts": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
A_ : Optional[Any] = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
A_ : Optional[Any] = special_cos_dist[i][concept_idx]
A_ : Tuple = self.special_care_embeds_weights[concept_idx].item()
A_ : Union[str, Any] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["special_scores"][concept_idx]} )
A_ : Any = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
A_ : Tuple = cos_dist[i][concept_idx]
A_ : Tuple = self.concept_embeds_weights[concept_idx].item()
A_ : Tuple = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(snake_case )
result.append(snake_case )
A_ : Any = [len(res["bad_concepts"] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :torch.FloatTensor , snake_case :torch.FloatTensor ):
'''simple docstring'''
A_ : List[str] = self.vision_model(snake_case )[1] # pooled_output
A_ : int = self.visual_projection(snake_case )
A_ : Tuple = cosine_distance(snake_case , self.special_care_embeds )
A_ : Tuple = cosine_distance(snake_case , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
A_ : Optional[Any] = 0.0
A_ : Tuple = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
A_ : Optional[Any] = torch.any(special_scores > 0 , dim=1 )
A_ : Optional[Any] = special_care * 0.01
A_ : Optional[int] = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
A_ : Union[str, Any] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
A_ : Union[str, Any] = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 300 | 0 |
from collections.abc import Sequence
from queue import Queue
class __lowercase :
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : int=None , lowerCAmelCase__ : List[str]=None):
SCREAMING_SNAKE_CASE_: Dict = start
SCREAMING_SNAKE_CASE_: Any = end
SCREAMING_SNAKE_CASE_: Union[str, Any] = val
SCREAMING_SNAKE_CASE_: str = (start + end) // 2
SCREAMING_SNAKE_CASE_: Optional[int] = left
SCREAMING_SNAKE_CASE_: Optional[Any] = right
def __repr__( self : int):
return F"SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})"
class __lowercase :
"""simple docstring"""
def __init__( self : int , lowerCAmelCase__ : Sequence , lowerCAmelCase__ : Dict):
SCREAMING_SNAKE_CASE_: Optional[int] = collection
SCREAMING_SNAKE_CASE_: int = function
if self.collection:
SCREAMING_SNAKE_CASE_: Optional[Any] = self._build_tree(0 , len(lowerCAmelCase__) - 1)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any]):
self._update_tree(self.root , lowerCAmelCase__ , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str):
return self._query_range(self.root , lowerCAmelCase__ , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[int]):
if start == end:
return SegmentTreeNode(lowerCAmelCase__ , lowerCAmelCase__ , self.collection[start])
SCREAMING_SNAKE_CASE_: int = (start + end) // 2
SCREAMING_SNAKE_CASE_: int = self._build_tree(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = self._build_tree(mid + 1 , lowerCAmelCase__)
return SegmentTreeNode(lowerCAmelCase__ , lowerCAmelCase__ , self.fn(left.val , right.val) , lowerCAmelCase__ , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int):
if node.start == i and node.end == i:
SCREAMING_SNAKE_CASE_: Optional[int] = val
return
if i <= node.mid:
self._update_tree(node.left , lowerCAmelCase__ , lowerCAmelCase__)
else:
self._update_tree(node.right , lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = self.fn(node.left.val , node.right.val)
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : str , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any):
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , lowerCAmelCase__ , lowerCAmelCase__)
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , lowerCAmelCase__ , node.mid) , self._query_range(node.right , node.mid + 1 , lowerCAmelCase__) , )
else:
# range in right child tree
return self._query_range(node.right , lowerCAmelCase__ , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : int):
if self.root is not None:
SCREAMING_SNAKE_CASE_: int = Queue()
queue.put(self.root)
while not queue.empty():
SCREAMING_SNAKE_CASE_: int = queue.get()
yield node
if node.left is not None:
queue.put(node.left)
if node.right is not None:
queue.put(node.right)
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print("""*""" * 50)
lowerCAmelCase : Optional[Any] = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 127 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
lowerCAmelCase : Any = logging.get_logger(__name__)
lowerCAmelCase : Dict = {"""vocab_file""": """vocab.txt"""}
lowerCAmelCase : List[str] = {
"""vocab_file""": {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt""",
}
}
lowerCAmelCase : List[Any] = {
"""YituTech/conv-bert-base""": 512,
"""YituTech/conv-bert-medium-small""": 512,
"""YituTech/conv-bert-small""": 512,
}
lowerCAmelCase : Tuple = {
"""YituTech/conv-bert-base""": {"""do_lower_case""": True},
"""YituTech/conv-bert-medium-small""": {"""do_lower_case""": True},
"""YituTech/conv-bert-small""": {"""do_lower_case""": True},
}
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : str = VOCAB_FILES_NAMES
_UpperCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : List[Any] = PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : Dict = ConvBertTokenizer
def __init__( self : Optional[Any] , lowerCAmelCase__ : Optional[int]=None , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : str=True , lowerCAmelCase__ : Any="[UNK]" , lowerCAmelCase__ : Optional[Any]="[SEP]" , lowerCAmelCase__ : Any="[PAD]" , lowerCAmelCase__ : Dict="[CLS]" , lowerCAmelCase__ : Dict="[MASK]" , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : str=None , **lowerCAmelCase__ : Dict , ):
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , **lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_: List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("lowercase" , lowerCAmelCase__) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCAmelCase__) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCAmelCase__) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE_: Optional[int] = getattr(lowerCAmelCase__ , normalizer_state.pop("type"))
SCREAMING_SNAKE_CASE_: Optional[Any] = do_lower_case
SCREAMING_SNAKE_CASE_: List[str] = strip_accents
SCREAMING_SNAKE_CASE_: Optional[Any] = tokenize_chinese_chars
SCREAMING_SNAKE_CASE_: Optional[int] = normalizer_class(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = do_lower_case
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Union[str, Any]=None):
SCREAMING_SNAKE_CASE_: List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None):
SCREAMING_SNAKE_CASE_: Optional[int] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_: int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None):
SCREAMING_SNAKE_CASE_: Any = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__)
return tuple(lowerCAmelCase__)
| 127 | 1 |
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> int:
if n == 1 or not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return 0
elif n == 2:
return 1
else:
lowerCamelCase__ : List[str] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> int:
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : Dict = 2
while digits < n:
index += 1
lowerCamelCase__ : Union[str, Any] = len(str(fibonacci(_UpperCAmelCase ) ) )
return index
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase = 1000 ) -> int:
return fibonacci_digits_index(_UpperCAmelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 50 |
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> bool:
lowerCamelCase__ : List[str] = len(_UpperCAmelCase )
lowerCamelCase__ : str = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
lowerCamelCase__ : Tuple = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
lowerCamelCase__ : Dict = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
lowerCamelCase__ : str = subset[i - 1][j]
if arr[i - 1] <= j:
lowerCamelCase__ : Dict = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50 | 1 |
# Algorithm for the pigeonhole sorting
def _a ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = min(SCREAMING_SNAKE_CASE__ ) # min() finds the minimum value
SCREAMING_SNAKE_CASE__ : Dict = max(SCREAMING_SNAKE_CASE__ ) # max() finds the maximum value
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
SCREAMING_SNAKE_CASE__ : int = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
for count in range(SCREAMING_SNAKE_CASE__ ):
while holes[count] > 0:
holes[count] -= 1
SCREAMING_SNAKE_CASE__ : Optional[int] = count + min_val
i += 1
def _a ( ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(SCREAMING_SNAKE_CASE__ )
print("Sorted order is:" , " ".join(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
main()
| 191 |
class lowerCamelCase :
"""simple docstring"""
def __init__( self : int, _UpperCAmelCase : Dict, _UpperCAmelCase : str ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = name
SCREAMING_SNAKE_CASE__ : Tuple = val
def __str__( self : str ) -> List[Any]:
"""simple docstring"""
return F'''{self.__class__.__name__}({self.name}, {self.val})'''
def __lt__( self : str, _UpperCAmelCase : Dict ) -> Union[str, Any]:
"""simple docstring"""
return self.val < other.val
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Optional[Any], _UpperCAmelCase : str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = {}
SCREAMING_SNAKE_CASE__ : str = {}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.build_heap(_UpperCAmelCase )
def __getitem__( self : Union[str, Any], _UpperCAmelCase : Any ) -> Dict:
"""simple docstring"""
return self.get_value(_UpperCAmelCase )
def A_ ( self : int, _UpperCAmelCase : Optional[int] ) -> Any:
"""simple docstring"""
return (idx - 1) // 2
def A_ ( self : Optional[Any], _UpperCAmelCase : Any ) -> Dict:
"""simple docstring"""
return idx * 2 + 1
def A_ ( self : str, _UpperCAmelCase : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return idx * 2 + 2
def A_ ( self : Tuple, _UpperCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
return self.heap_dict[key]
def A_ ( self : Optional[int], _UpperCAmelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = len(_UpperCAmelCase ) - 1
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_parent_idx(_UpperCAmelCase )
for idx, i in enumerate(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ : Optional[int] = idx
SCREAMING_SNAKE_CASE__ : Optional[Any] = i.val
for i in range(_UpperCAmelCase, -1, -1 ):
self.sift_down(_UpperCAmelCase, _UpperCAmelCase )
return array
def A_ ( self : List[Any], _UpperCAmelCase : Dict, _UpperCAmelCase : Tuple ) -> Tuple:
"""simple docstring"""
while True:
SCREAMING_SNAKE_CASE__ : str = self.get_left_child_idx(_UpperCAmelCase ) # noqa: E741
SCREAMING_SNAKE_CASE__ : Tuple = self.get_right_child_idx(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = idx
if l < len(_UpperCAmelCase ) and array[l] < array[idx]:
SCREAMING_SNAKE_CASE__ : List[Any] = l
if r < len(_UpperCAmelCase ) and array[r] < array[smallest]:
SCREAMING_SNAKE_CASE__ : int = r
if smallest != idx:
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Any = array[smallest], array[idx]
(
(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,
) : Optional[Any] = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = smallest
else:
break
def A_ ( self : Union[str, Any], _UpperCAmelCase : str ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.get_parent_idx(_UpperCAmelCase )
while p >= 0 and self.heap[p] > self.heap[idx]:
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Any = self.heap[idx], self.heap[p]
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : int = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
SCREAMING_SNAKE_CASE__ : Dict = p
SCREAMING_SNAKE_CASE__ : List[str] = self.get_parent_idx(_UpperCAmelCase )
def A_ ( self : str ) -> List[str]:
"""simple docstring"""
return self.heap[0]
def A_ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Optional[int] = self.heap[-1], self.heap[0]
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[Any] = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
SCREAMING_SNAKE_CASE__ : Any = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0, self.heap )
return x
def A_ ( self : Union[str, Any], _UpperCAmelCase : str ) -> Optional[Any]:
"""simple docstring"""
self.heap.append(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = len(self.heap ) - 1
SCREAMING_SNAKE_CASE__ : Optional[Any] = node.val
self.sift_up(len(self.heap ) - 1 )
def A_ ( self : Optional[int] ) -> int:
"""simple docstring"""
return len(self.heap ) == 0
def A_ ( self : Any, _UpperCAmelCase : Tuple, _UpperCAmelCase : str ) -> Dict:
"""simple docstring"""
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
SCREAMING_SNAKE_CASE__ : Tuple = new_value
SCREAMING_SNAKE_CASE__ : List[Any] = new_value
self.sift_up(self.idx_of_element[node] )
_lowerCamelCase : Tuple = Node('''R''', -1)
_lowerCamelCase : int = Node('''B''', 6)
_lowerCamelCase : str = Node('''A''', 3)
_lowerCamelCase : Optional[Any] = Node('''X''', 1)
_lowerCamelCase : str = Node('''E''', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
_lowerCamelCase : int = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('''Min Heap - before decrease key''')
for i in my_min_heap.heap:
print(i)
print('''Min Heap - After decrease key of node [B -> -17]''')
my_min_heap.decrease_key(b, -1_7)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 191 | 1 |
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
__lowercase = logging.get_logger(__name__)
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Dict = ["""audio_values""", """audio_mask"""]
def __init__( self , __lowercase=2_048 , __lowercase=1 , __lowercase=[16, 16] , __lowercase=128 , __lowercase=44_100 , __lowercase=86 , __lowercase=2_048 , __lowercase=0.0 , **__lowercase , ) -> Optional[int]:
super().__init__(
feature_size=__lowercase , sampling_rate=__lowercase , padding_value=__lowercase , **__lowercase , )
__UpperCamelCase :Any = spectrogram_length
__UpperCamelCase :Tuple = num_channels
__UpperCamelCase :Tuple = patch_size
__UpperCamelCase :Any = feature_size // self.patch_size[1]
__UpperCamelCase :Union[str, Any] = n_fft
__UpperCamelCase :Any = sampling_rate // hop_length_to_sampling_rate
__UpperCamelCase :int = sampling_rate
__UpperCamelCase :List[str] = padding_value
__UpperCamelCase :List[Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__lowercase , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=__lowercase , norm='''slaney''' , mel_scale='''slaney''' , ).T
def UpperCamelCase__ ( self , __lowercase) -> np.ndarray:
__UpperCamelCase :str = spectrogram(
__lowercase , window_function(self.n_fft , '''hann''') , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='''dB''' , db_range=80.0 , )
__UpperCamelCase :Union[str, Any] = log_spec[:, :-1]
__UpperCamelCase :int = log_spec - 20.0
__UpperCamelCase :Optional[int] = np.clip(log_spec / 40.0 , -2.0 , 0.0) + 1.0
return log_spec
def __call__( self , __lowercase , __lowercase = None , __lowercase = True , __lowercase = None , __lowercase = False , __lowercase = False , **__lowercase , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'''This feature extractor is set to support sampling rate'''
f""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
f""" with {self.sampling_rate} and not {sampling_rate}.""")
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''')
__UpperCamelCase :Dict = isinstance(__lowercase , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""")
__UpperCamelCase :str = is_batched_numpy or (
isinstance(__lowercase , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
__UpperCamelCase :Optional[int] = [np.asarray([speech] , dtype=np.floataa).T for speech in raw_speech]
elif not is_batched and not isinstance(__lowercase , np.ndarray):
__UpperCamelCase :int = np.asarray(__lowercase , dtype=np.floataa)
elif isinstance(__lowercase , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
__UpperCamelCase :Optional[int] = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
__UpperCamelCase :Optional[int] = [np.asarray([raw_speech]).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
__UpperCamelCase :List[Any] = [
self._np_extract_fbank_features(waveform.squeeze()).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , __lowercase):
__UpperCamelCase :List[str] = [np.asarray(__lowercase , dtype=np.floataa) for feature in audio_features]
# Create audio attention mask
__UpperCamelCase :Dict = max(
[ceil(feature.shape[0] / self.patch_size[0]) * self.freq_len for feature in audio_features]) # The maximum number of audio patches in a batch
if return_attention_mask:
__UpperCamelCase :List[Any] = [
(ceil(feature.shape[0] / self.patch_size[0]) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0]) * self.freq_len) * [0]
for feature in audio_features
]
__UpperCamelCase :Union[str, Any] = np.array(__lowercase).astype(np.floataa)
# convert into correct format for padding
__UpperCamelCase :str = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
__UpperCamelCase :List[str] = np.ones([len(__lowercase), 1, max_time_len, self.feature_size]).astype(np.floataa)
__UpperCamelCase :List[Any] = padded_audio_features * self.padding_value
for i in range(len(__lowercase)):
__UpperCamelCase :Optional[int] = audio_features[i]
__UpperCamelCase :Any = feature
# return as BatchFeature
if return_attention_mask:
__UpperCamelCase :Optional[Any] = {'''audio_values''': padded_audio_features, '''audio_mask''': audio_mask}
else:
__UpperCamelCase :Optional[Any] = {'''audio_values''': padded_audio_features}
__UpperCamelCase :Any = BatchFeature(data=__lowercase , tensor_type=__lowercase)
return encoded_inputs
| 43 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : List[Any] = logging.get_logger(__name__)
lowercase : Any = {
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
_A = 'pegasus'
_A = ['past_key_values']
_A = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self :Dict , a :Dict=5_0_2_6_5 , a :Dict=1_0_2_4 , a :Union[str, Any]=1_2 , a :Any=4_0_9_6 , a :str=1_6 , a :str=1_2 , a :Optional[Any]=4_0_9_6 , a :int=1_6 , a :Optional[int]=0.0 , a :Optional[int]=0.0 , a :List[Any]=True , a :Union[str, Any]=True , a :int="gelu" , a :Dict=1_0_2_4 , a :List[Any]=0.1 , a :List[str]=0.0 , a :List[Any]=0.0 , a :str=0.02 , a :int=0 , a :Any=False , a :Dict=0 , a :int=1 , a :Optional[Any]=1 , **a :Optional[int] , ) -> str:
__UpperCamelCase : List[Any] = vocab_size
__UpperCamelCase : Union[str, Any] = max_position_embeddings
__UpperCamelCase : str = d_model
__UpperCamelCase : Dict = encoder_ffn_dim
__UpperCamelCase : int = encoder_layers
__UpperCamelCase : int = encoder_attention_heads
__UpperCamelCase : List[Any] = decoder_ffn_dim
__UpperCamelCase : List[Any] = decoder_layers
__UpperCamelCase : List[str] = decoder_attention_heads
__UpperCamelCase : str = dropout
__UpperCamelCase : Union[str, Any] = attention_dropout
__UpperCamelCase : List[str] = activation_dropout
__UpperCamelCase : Optional[Any] = activation_function
__UpperCamelCase : Tuple = init_std
__UpperCamelCase : Optional[int] = encoder_layerdrop
__UpperCamelCase : Union[str, Any] = decoder_layerdrop
__UpperCamelCase : Optional[Any] = use_cache
__UpperCamelCase : Union[str, Any] = encoder_layers
__UpperCamelCase : int = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=a , eos_token_id=a , is_encoder_decoder=a , decoder_start_token_id=a , forced_eos_token_id=a , **a , )
@property
def _lowerCamelCase ( self :Dict ) -> int:
return self.encoder_attention_heads
@property
def _lowerCamelCase ( self :Optional[Any] ) -> int:
return self.d_model | 232 | 0 |
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__A = {
"/attention/": "/0/SelfAttention/",
"/self_attention/": "/0/SelfAttention/",
"/encoder_decoder_attention/": "/1/EncDecAttention/",
"value": "v",
"query": "q",
"key": "k",
"out": "o",
"pre_self_attention_layer_norm": "0/layer_norm",
"pre_cross_attention_layer_norm": "1/layer_norm",
"pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong
"token_embedder": "shared",
"encoder_norm": "final_layer_norm",
"decoder_norm": "final_layer_norm",
"relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight",
"router/router_weights/w/": "router/classifier/",
"roer/roer_weights/w/": "router/classifier/",
"logits_dense": "lm_head",
}
def lowerCAmelCase_ ( __a ) -> List[str]:
"""simple docstring"""
lowerCamelCase__: Any =list(s_dict.keys() )
for key in keys:
lowerCamelCase__: List[Any] =R".*/layers_(\d+)"
lowerCamelCase__: Optional[Any] =key
if re.match(__a , __a ):
lowerCamelCase__: str =re.sub(R"layers_(\d+)" , R"block/\1/layer" , __a )
lowerCamelCase__: Any =R"(encoder|decoder)\/"
if re.match(__a , __a ):
lowerCamelCase__: Union[str, Any] =re.match(__a , __a ).groups()
if groups[0] == "encoder":
lowerCamelCase__: Union[str, Any] =re.sub(R"/mlp/" , R"/1/mlp/" , __a )
lowerCamelCase__: Optional[int] =re.sub(R"/pre_mlp_layer_norm/" , R"/1/layer_norm/" , __a )
elif groups[0] == "decoder":
lowerCamelCase__: Tuple =re.sub(R"/mlp/" , R"/2/mlp/" , __a )
lowerCamelCase__: Optional[int] =re.sub(R"/pre_mlp_layer_norm/" , R"/2/layer_norm/" , __a )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
lowerCamelCase__: Tuple =new_key.replace(__a , __a )
print(F"""{key} -> {new_key}""" )
lowerCamelCase__: Any =s_dict.pop(__a )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
lowerCamelCase__: List[Any] =s_dict[
"encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
lowerCamelCase__: str =s_dict[
"decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
lowerCamelCase__: int =s_dict[key].shape[0]
lowerCamelCase__: Dict =s_dict[key]
for idx in range(__a ):
lowerCamelCase__: Any =expert_weihts[idx]
print(F"""{key} -> {key.replace("expert/" , "nested fstring" )}""" )
s_dict.pop(__a )
return s_dict
__A = {
"NUM_ENCODER_LAYERS": "num_layers",
"NUM_DECODER_LAYERS": "num_decoder_layers",
"NUM_HEADS": "num_heads",
"HEAD_DIM": "d_kv",
"EMBED_DIM": "d_model",
"MLP_DIM": "d_ff",
"NUM_SELECTED_EXPERTS": "num_selected_experts",
"NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers",
"NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers",
"dense.MlpBlock.activations": "feed_forward_proj",
}
def lowerCAmelCase_ ( __a , __a ) -> List[Any]:
"""simple docstring"""
import regex as re
with open(__a , "r" ) as f:
lowerCamelCase__: Optional[int] =f.read()
lowerCamelCase__: int =re.findall(R"(.*) = ([0-9.]*)" , __a )
lowerCamelCase__: Optional[Any] ={}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
lowerCamelCase__: Any =float(__a ) if "." in value else int(__a )
lowerCamelCase__: str =re.findall(R"(.*activations) = \(\'(.*)\',\)" , __a )[0]
lowerCamelCase__: Tuple =str(activation[1] )
lowerCamelCase__: List[str] =num_experts
lowerCamelCase__: List[str] =SwitchTransformersConfig(**__a )
return config
def lowerCAmelCase_ ( __a , __a , __a=None , __a="./" , __a=8 ) -> List[Any]:
"""simple docstring"""
print(F"""Loading flax weights from : {flax_checkpoint_path}""" )
lowerCamelCase__: str =checkpoints.load_tax_checkpoint(__a )
if gin_file is not None:
lowerCamelCase__: int =convert_gin_to_config(__a , __a )
else:
lowerCamelCase__: Dict =SwitchTransformersConfig.from_pretrained(__a )
lowerCamelCase__: str =SwitchTransformersForConditionalGeneration(__a )
lowerCamelCase__: str =flax_params["target"]
lowerCamelCase__: Tuple =flatten_dict(__a , sep="/" )
lowerCamelCase__: Tuple =rename_keys(__a )
lowerCamelCase__: Union[str, Any] =unflatten_dict(__a , sep="/" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(__a , __a )
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
pt_model.save_pretrained(__a )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"
" model architecture. If not provided, a `gin_file` has to be provided."
),
)
parser.add_argument(
"--gin_file",
default=None,
type=str,
required=False,
help="Path to the gin config file. If not provided, a `config_file` has to be passed ",
)
parser.add_argument(
"--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model."
)
parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts")
__A = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 273 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/config.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/config.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"
),
"distilbert-base-uncased-finetuned-sst-2-english": (
"https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"
),
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "distilbert"
lowercase_ = {
"hidden_size": "dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
}
def __init__(self : Any , UpperCAmelCase_ : str=30_522 , UpperCAmelCase_ : Union[str, Any]=512 , UpperCAmelCase_ : int=False , UpperCAmelCase_ : Optional[Any]=6 , UpperCAmelCase_ : Optional[Any]=12 , UpperCAmelCase_ : Any=768 , UpperCAmelCase_ : List[Any]=4 * 768 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Any="gelu" , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : Optional[int]=0.2 , UpperCAmelCase_ : int=0 , **UpperCAmelCase_ : List[Any] , ) ->Any:
'''simple docstring'''
lowerCamelCase__: int =vocab_size
lowerCamelCase__: Any =max_position_embeddings
lowerCamelCase__: Optional[int] =sinusoidal_pos_embds
lowerCamelCase__: str =n_layers
lowerCamelCase__: str =n_heads
lowerCamelCase__: str =dim
lowerCamelCase__: Optional[Any] =hidden_dim
lowerCamelCase__: Dict =dropout
lowerCamelCase__: Optional[Any] =attention_dropout
lowerCamelCase__: int =activation
lowerCamelCase__: Dict =initializer_range
lowerCamelCase__: Optional[Any] =qa_dropout
lowerCamelCase__: int =seq_classif_dropout
super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_)
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
lowerCamelCase__: Dict ={0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCamelCase__: Optional[int] ={0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
])
| 273 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json",
}
class lowercase ( UpperCamelCase__ ):
_a = "gpt_bigcode"
_a = ["past_key_values"]
_a = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , _a=5_0257 , _a=1024 , _a=768 , _a=12 , _a=12 , _a=None , _a="gelu_pytorch_tanh" , _a=0.1 , _a=0.1 , _a=0.1 , _a=1e-5 , _a=0.02 , _a=True , _a=True , _a=5_0256 , _a=5_0256 , _a=True , _a=True , _a=True , **_a , ) -> Union[str, Any]:
_A : List[str] = vocab_size
_A : List[Any] = n_positions
_A : Union[str, Any] = n_embd
_A : Any = n_layer
_A : Tuple = n_head
_A : List[str] = n_inner
_A : Optional[int] = activation_function
_A : Optional[int] = resid_pdrop
_A : List[str] = embd_pdrop
_A : int = attn_pdrop
_A : str = layer_norm_epsilon
_A : int = initializer_range
_A : List[str] = scale_attn_weights
_A : Optional[int] = use_cache
_A : Any = attention_softmax_in_fpaa
_A : Dict = scale_attention_softmax_in_fpaa
_A : Any = multi_query
_A : Any = bos_token_id
_A : Union[str, Any] = eos_token_id
super().__init__(bos_token_id=_a , eos_token_id=_a , **_a )
| 26 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class lowercase ( UpperCamelCase__,UpperCamelCase__ ):
_a = 1
@register_to_config
def __init__( self , _a=2000 , _a=0.1 , _a=20 , _a=1e-3 ) -> List[Any]:
_A : Dict = None
_A : List[Any] = None
_A : Dict = None
def a__ ( self , _a , _a = None ) -> Union[str, Any]:
_A : Union[str, Any] = torch.linspace(1 , self.config.sampling_eps , _a , device=_a )
def a__ ( self , _a , _a , _a , _a=None ) -> Dict:
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
_A : Any = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
_A : List[Any] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
_A : List[str] = std.flatten()
while len(std.shape ) < len(score.shape ):
_A : List[Any] = std.unsqueeze(-1 )
_A : int = -score / std
# compute
_A : Tuple = -1.0 / len(self.timesteps )
_A : str = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
_A : List[str] = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
_A : Union[str, Any] = beta_t.unsqueeze(-1 )
_A : Tuple = -0.5 * beta_t * x
_A : Tuple = torch.sqrt(_a )
_A : Dict = drift - diffusion**2 * score
_A : Dict = x + drift * dt
# add noise
_A : Any = randn_tensor(x.shape , layout=x.layout , generator=_a , device=x.device , dtype=x.dtype )
_A : str = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self ) -> Optional[Any]:
return self.config.num_train_timesteps
| 26 | 1 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class __a (__UpperCamelCase):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Any = 42
_SCREAMING_SNAKE_CASE :Any = 42
class __a (__UpperCamelCase , __UpperCamelCase):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Optional[Any] = 1
@register_to_config
def __init__( self , _a = 2_000 , _a = 0.15 , _a = 0.01 , _a = 1_348.0 , _a = 1E-5 , _a = 1 , ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = sigma_max
# setable values
SCREAMING_SNAKE_CASE__ : str = None
self.set_sigmas(_a , _a , _a , _a )
def _a ( self , _a , _a = None ) -> torch.FloatTensor:
"""simple docstring"""
return sample
def _a ( self , _a , _a = None , _a = None ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = sampling_eps if sampling_eps is not None else self.config.sampling_eps
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.linspace(1 , _a , _a , device=_a )
def _a ( self , _a , _a = None , _a = None , _a = None ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = sigma_min if sigma_min is not None else self.config.sigma_min
SCREAMING_SNAKE_CASE__ : Tuple = sigma_max if sigma_max is not None else self.config.sigma_max
SCREAMING_SNAKE_CASE__ : Tuple = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(_a , _a )
SCREAMING_SNAKE_CASE__ : Any = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
SCREAMING_SNAKE_CASE__ : List[str] = torch.exp(torch.linspace(math.log(_a ) , math.log(_a ) , _a ) )
SCREAMING_SNAKE_CASE__ : Any = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def _a ( self , _a , _a ) -> Union[str, Any]:
"""simple docstring"""
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def _a ( self , _a , _a , _a , _a = None , _a = True , ) -> Union[SdeVeOutput, Tuple]:
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler""" )
SCREAMING_SNAKE_CASE__ : List[Any] = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
SCREAMING_SNAKE_CASE__ : List[str] = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
SCREAMING_SNAKE_CASE__ : Optional[int] = timesteps.to(self.discrete_sigmas.device )
SCREAMING_SNAKE_CASE__ : str = self.discrete_sigmas[timesteps].to(sample.device )
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_adjacent_sigma(_a , _a ).to(sample.device )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.zeros_like(_a )
SCREAMING_SNAKE_CASE__ : str = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
SCREAMING_SNAKE_CASE__ : Dict = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
SCREAMING_SNAKE_CASE__ : List[Any] = diffusion.unsqueeze(-1 )
SCREAMING_SNAKE_CASE__ : List[str] = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
SCREAMING_SNAKE_CASE__ : Optional[Any] = randn_tensor(
sample.shape , layout=sample.layout , generator=_a , device=sample.device , dtype=sample.dtype )
SCREAMING_SNAKE_CASE__ : Tuple = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
SCREAMING_SNAKE_CASE__ : Dict = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=_a , prev_sample_mean=_a )
def _a ( self , _a , _a , _a = None , _a = True , ) -> Union[SchedulerOutput, Tuple]:
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler""" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
SCREAMING_SNAKE_CASE__ : Union[str, Any] = randn_tensor(sample.shape , layout=sample.layout , generator=_a ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
SCREAMING_SNAKE_CASE__ : List[Any] = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
SCREAMING_SNAKE_CASE__ : Dict = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
SCREAMING_SNAKE_CASE__ : List[Any] = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
SCREAMING_SNAKE_CASE__ : Union[str, Any] = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
SCREAMING_SNAKE_CASE__ : Tuple = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = step_size.unsqueeze(-1 )
SCREAMING_SNAKE_CASE__ : str = sample + step_size * model_output
SCREAMING_SNAKE_CASE__ : List[str] = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_a )
def _a ( self , _a , _a , _a , ) -> torch.FloatTensor:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = timesteps.to(original_samples.device )
SCREAMING_SNAKE_CASE__ : Dict = self.discrete_sigmas.to(original_samples.device )[timesteps]
SCREAMING_SNAKE_CASE__ : Tuple = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(_a ) * sigmas[:, None, None, None]
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = noise + original_samples
return noisy_samples
def __len__( self ) -> Tuple:
"""simple docstring"""
return self.config.num_train_timesteps
| 351 |
"""simple docstring"""
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class __a (tf.keras.optimizers.schedules.LearningRateSchedule):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a = 1.0 , _a = None , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ : List[Any] = initial_learning_rate
SCREAMING_SNAKE_CASE__ : Tuple = warmup_steps
SCREAMING_SNAKE_CASE__ : Optional[Any] = power
SCREAMING_SNAKE_CASE__ : Optional[Any] = decay_schedule_fn
SCREAMING_SNAKE_CASE__ : Any = name
def __call__( self , _a ) -> List[Any]:
"""simple docstring"""
with tf.name_scope(self.name or """WarmUp""" ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.cast(_a , tf.floataa )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.cast(self.warmup_steps , tf.floataa )
SCREAMING_SNAKE_CASE__ : str = global_step_float / warmup_steps_float
SCREAMING_SNAKE_CASE__ : Optional[int] = self.initial_learning_rate * tf.math.pow(_a , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=_a , )
def _a ( self ) -> List[Any]:
"""simple docstring"""
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 0.9 , __lowerCAmelCase = 0.999 , __lowerCAmelCase = 1E-8 , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 1.0 , __lowerCAmelCase = None , ) -> Dict:
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=__lowerCAmelCase , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=__lowerCAmelCase , )
if num_warmup_steps:
SCREAMING_SNAKE_CASE__ : Dict = WarmUp(
initial_learning_rate=__lowerCAmelCase , decay_schedule_fn=__lowerCAmelCase , warmup_steps=__lowerCAmelCase , )
if weight_decay_rate > 0.0:
SCREAMING_SNAKE_CASE__ : int = AdamWeightDecay(
learning_rate=__lowerCAmelCase , weight_decay_rate=__lowerCAmelCase , beta_a=__lowerCAmelCase , beta_a=__lowerCAmelCase , epsilon=__lowerCAmelCase , clipnorm=__lowerCAmelCase , global_clipnorm=__lowerCAmelCase , exclude_from_weight_decay=["""LayerNorm""", """layer_norm""", """bias"""] , include_in_weight_decay=__lowerCAmelCase , )
else:
SCREAMING_SNAKE_CASE__ : int = tf.keras.optimizers.Adam(
learning_rate=__lowerCAmelCase , beta_a=__lowerCAmelCase , beta_a=__lowerCAmelCase , epsilon=__lowerCAmelCase , clipnorm=__lowerCAmelCase , global_clipnorm=__lowerCAmelCase , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class __a (UpperCamelCase_):
'''simple docstring'''
def __init__( self , _a = 0.001 , _a = 0.9 , _a = 0.999 , _a = 1E-7 , _a = False , _a = 0.0 , _a = None , _a = None , _a = "AdamWeightDecay" , **_a , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(_a , _a , _a , _a , _a , _a , **_a )
SCREAMING_SNAKE_CASE__ : Tuple = weight_decay_rate
SCREAMING_SNAKE_CASE__ : Tuple = include_in_weight_decay
SCREAMING_SNAKE_CASE__ : Dict = exclude_from_weight_decay
@classmethod
def _a ( cls , _a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = {"""WarmUp""": WarmUp}
return super(_a , cls ).from_config(_a , custom_objects=_a )
def _a ( self , _a , _a , _a ) -> str:
"""simple docstring"""
super(_a , self )._prepare_local(_a , _a , _a )
SCREAMING_SNAKE_CASE__ : Tuple = tf.constant(
self.weight_decay_rate , name="""adam_weight_decay_rate""" )
def _a ( self , _a , _a , _a ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["""weight_decay_rate"""] , use_locking=self._use_locking , )
return tf.no_op()
def _a ( self , _a , _a=None , **_a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = list(zip(*_a ) )
return super(_a , self ).apply_gradients(zip(_a , _a ) , name=_a , **_a )
def _a ( self , _a , _a , _a ) -> str:
"""simple docstring"""
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
SCREAMING_SNAKE_CASE__ : Dict = apply_state or {}
SCREAMING_SNAKE_CASE__ : List[str] = apply_state.get((var_device, var_dtype) )
if coefficients is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._fallback_apply_state(_a , _a )
SCREAMING_SNAKE_CASE__ : List[Any] = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def _a ( self , _a , _a , _a=None ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = self._get_lr(var.device , var.dtype.base_dtype , _a )
SCREAMING_SNAKE_CASE__ : Any = self._decay_weights_op(_a , _a , _a )
with tf.control_dependencies([decay] ):
return super(_a , self )._resource_apply_dense(_a , _a , **_a )
def _a ( self , _a , _a , _a , _a=None ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self._get_lr(var.device , var.dtype.base_dtype , _a )
SCREAMING_SNAKE_CASE__ : Dict = self._decay_weights_op(_a , _a , _a )
with tf.control_dependencies([decay] ):
return super(_a , self )._resource_apply_sparse(_a , _a , _a , **_a )
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = super().get_config()
config.update({"""weight_decay_rate""": self.weight_decay_rate} )
return config
def _a ( self , _a ) -> Tuple:
"""simple docstring"""
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(_a , _a ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(_a , _a ) is not None:
return False
return True
class __a (UpperCamelCase_):
'''simple docstring'''
def __init__( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = []
SCREAMING_SNAKE_CASE__ : List[str] = None
@property
def _a ( self ) -> str:
"""simple docstring"""
if self._accum_steps is None:
SCREAMING_SNAKE_CASE__ : Dict = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=_a , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def _a ( self ) -> List[str]:
"""simple docstring"""
if not self._gradients:
raise ValueError("""The accumulator should be called first to initialize the gradients""" )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self , _a ) -> str:
"""simple docstring"""
if not self._gradients:
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(_a ) , trainable=_a , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(_a ) != len(self._gradients ):
raise ValueError(f'''Expected {len(self._gradients )} gradients, but got {len(_a )}''' )
for accum_gradient, gradient in zip(self._gradients , _a ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(_a )
self._accum_steps.assign_add(1 )
def _a ( self ) -> Any:
"""simple docstring"""
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(_a ) )
| 56 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case ( A__ ,A__ ,A__ ):
# Initialise PyTorch model
UpperCAmelCase_ : Any = RemBertConfig.from_json_file(A__ )
print("Building PyTorch model from configuration: {}".format(str(A__ ) ) )
UpperCAmelCase_ : Tuple = RemBertModel(A__ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(A__ ,A__ ,A__ )
# Save pytorch-model
print("Save PyTorch model to {}".format(A__ ) )
torch.save(model.state_dict() ,A__ )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--rembert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained RemBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCamelCase_ = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 268 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
lowerCamelCase_ = None
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase_ = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''',
'''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''',
},
}
lowerCamelCase_ = {
'''facebook/mbart-large-en-ro''': 1024,
'''facebook/mbart-large-cc25''': 1024,
}
# fmt: off
lowerCamelCase_ = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class UpperCamelCase_ (__A ):
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = ['''input_ids''', '''attention_mask''']
__magic_name__ = MBartTokenizer
__magic_name__ = []
__magic_name__ = []
def __init__( self : List[str] , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : str=None , lowerCAmelCase_ : List[Any]="<s>" , lowerCAmelCase_ : Optional[Any]="</s>" , lowerCAmelCase_ : str="</s>" , lowerCAmelCase_ : str="<s>" , lowerCAmelCase_ : List[Any]="<unk>" , lowerCAmelCase_ : Tuple="<pad>" , lowerCAmelCase_ : Union[str, Any]="<mask>" , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : str=None , lowerCAmelCase_ : Optional[Any]=None , **lowerCAmelCase_ : Any , ) -> Tuple:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ : Union[str, Any] = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else mask_token
super().__init__(
vocab_file=lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , src_lang=lowerCAmelCase_ , tgt_lang=lowerCAmelCase_ , additional_special_tokens=lowerCAmelCase_ , **lowerCAmelCase_ , )
UpperCAmelCase_ : Tuple = vocab_file
UpperCAmelCase_ : str = False if not self.vocab_file else True
UpperCAmelCase_ : List[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
UpperCAmelCase_ : Tuple = {
lang_code: self.convert_tokens_to_ids(lowerCAmelCase_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
UpperCAmelCase_ : int = src_lang if src_lang is not None else "en_XX"
UpperCAmelCase_ : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang )
UpperCAmelCase_ : Any = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
return self._src_lang
@src_lang.setter
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : str ) -> None:
UpperCAmelCase_ : List[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase_ : List[str] = [self.sep_token_id]
UpperCAmelCase_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] , lowerCAmelCase_ : Optional[str] , **lowerCAmelCase_ : Union[str, Any] ) -> Dict:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
UpperCAmelCase_ : str = src_lang
UpperCAmelCase_ : str = self(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = self.convert_tokens_to_ids(lowerCAmelCase_ )
UpperCAmelCase_ : Dict = tgt_lang_id
return inputs
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str = "en_XX" , lowerCAmelCase_ : Optional[List[str]] = None , lowerCAmelCase_ : str = "ro_RO" , **lowerCAmelCase_ : Dict , ) -> BatchEncoding:
UpperCAmelCase_ : List[Any] = src_lang
UpperCAmelCase_ : Tuple = tgt_lang
return super().prepare_seqaseq_batch(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
return self.set_src_lang_special_tokens(self.src_lang )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : List[Any] ) -> None:
UpperCAmelCase_ : int = self.convert_tokens_to_ids(lowerCAmelCase_ )
UpperCAmelCase_ : str = []
UpperCAmelCase_ : str = [self.eos_token_id, self.cur_lang_code]
UpperCAmelCase_ : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCAmelCase_ : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCAmelCase_ : Optional[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : str ) -> None:
UpperCAmelCase_ : Union[str, Any] = self.convert_tokens_to_ids(lowerCAmelCase_ )
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : Optional[int] = [self.eos_token_id, self.cur_lang_code]
UpperCAmelCase_ : Tuple = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCAmelCase_ : Tuple = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCAmelCase_ : Optional[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
UpperCAmelCase_ : List[str] = os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ):
copyfile(self.vocab_file , lowerCAmelCase_ )
return (out_vocab_file,)
| 268 | 1 |
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
lowercase : List[Any] = HUGGINGFACE_HUB_CACHE
lowercase : Any = """config.json"""
lowercase : Optional[Any] = """diffusion_pytorch_model.bin"""
lowercase : Dict = """diffusion_flax_model.msgpack"""
lowercase : Tuple = """model.onnx"""
lowercase : Optional[int] = """diffusion_pytorch_model.safetensors"""
lowercase : Any = """weights.pb"""
lowercase : Union[str, Any] = """https://huggingface.co"""
lowercase : str = default_cache_path
lowercase : Dict = """diffusers_modules"""
lowercase : str = os.getenv("""HF_MODULES_CACHE""", os.path.join(hf_cache_home, """modules"""))
lowercase : Tuple = ["""fp16""", """non-ema"""]
lowercase : Union[str, Any] = """.self_attn"""
| 352 |
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> int:
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and number_of_steps > 0
), f"number_of_steps needs to be positive integer, your input {number_of_steps}"
if number_of_steps == 1:
return 1
lowercase , lowercase : Tuple = 1, 1
for _ in range(number_of_steps - 1 ):
lowercase , lowercase : str = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 285 | 0 |
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("repo_id" , ["canonical_dataset_name", "org-name/dataset-name"] )
@pytest.mark.parametrize("path" , ["filename.csv", "filename with blanks.csv"] )
@pytest.mark.parametrize("revision" , [None, "v2"] )
def _lowerCAmelCase ( lowerCAmelCase_ :Union[str, Any] , lowerCAmelCase_ :List[str] , lowerCAmelCase_ :int )->Any:
'''simple docstring'''
snake_case_ = hf_hub_url(repo_id=__a , path=__a , revision=__a )
assert url == F'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(__a )}'''
| 159 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
def lowerCAmelCase_ ( __a ) -> YolosConfig:
"""simple docstring"""
lowerCamelCase__: str =YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowerCamelCase__: int =192
lowerCamelCase__: Optional[int] =768
lowerCamelCase__: Any =12
lowerCamelCase__: str =3
lowerCamelCase__: Optional[int] =[800, 1333]
lowerCamelCase__: Union[str, Any] =False
elif yolos_name == "yolos_s_dWr":
lowerCamelCase__: int =330
lowerCamelCase__: Optional[Any] =14
lowerCamelCase__: Any =6
lowerCamelCase__: List[str] =1320
elif "yolos_s" in yolos_name:
lowerCamelCase__: List[str] =384
lowerCamelCase__: Union[str, Any] =1536
lowerCamelCase__: List[Any] =12
lowerCamelCase__: Any =6
elif "yolos_b" in yolos_name:
lowerCamelCase__: str =[800, 1344]
lowerCamelCase__: int =91
lowerCamelCase__: str ="huggingface/label-files"
lowerCamelCase__: List[str] ="coco-detection-id2label.json"
lowerCamelCase__: Tuple =json.load(open(hf_hub_download(__a , __a , repo_type="dataset" ) , "r" ) )
lowerCamelCase__: Dict ={int(__a ): v for k, v in idalabel.items()}
lowerCamelCase__: List[str] =idalabel
lowerCamelCase__: int ={v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( __a , __a , __a = False ) -> Dict:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase__: Optional[int] =state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
lowerCamelCase__: Dict =state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__: Union[str, Any] =in_proj_weight[: config.hidden_size, :]
lowerCamelCase__: str =in_proj_bias[: config.hidden_size]
lowerCamelCase__: str =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase__: str =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase__: Optional[int] =in_proj_weight[-config.hidden_size :, :]
lowerCamelCase__: List[Any] =in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( __a ) -> str:
"""simple docstring"""
if "backbone" in name:
lowerCamelCase__: Optional[Any] =name.replace("backbone" , "vit" )
if "cls_token" in name:
lowerCamelCase__: Optional[int] =name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
lowerCamelCase__: str =name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
lowerCamelCase__: Tuple =name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
lowerCamelCase__: Any =name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
lowerCamelCase__: List[Any] =name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
lowerCamelCase__: Union[str, Any] =name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
lowerCamelCase__: Any =name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
lowerCamelCase__: Optional[int] =name.replace("attn" , "attention.self" )
if "norm1" in name:
lowerCamelCase__: int =name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowerCamelCase__: int =name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowerCamelCase__: List[str] =name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowerCamelCase__: Any =name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
lowerCamelCase__: Dict =name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
lowerCamelCase__: List[str] =name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
lowerCamelCase__: Any =name.replace("vit.norm" , "vit.layernorm" )
return name
def lowerCAmelCase_ ( __a , __a ) -> dict:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowerCamelCase__: Any =orig_state_dict.pop(__a )
if "qkv" in key:
lowerCamelCase__: Tuple =key.split("." )
lowerCamelCase__: List[str] =int(key_split[2] )
lowerCamelCase__: Tuple =model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowerCamelCase__: int =val[:dim, :]
lowerCamelCase__: str =val[
dim : dim * 2, :
]
lowerCamelCase__: Any =val[-dim:, :]
else:
lowerCamelCase__: Tuple =val[:dim]
lowerCamelCase__: Optional[Any] =val[dim : dim * 2]
lowerCamelCase__: str =val[-dim:]
else:
lowerCamelCase__: Dict =val
return orig_state_dict
def lowerCAmelCase_ ( ) -> torch.Tensor:
"""simple docstring"""
lowerCamelCase__: Any ="http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase__: Optional[Any] =Image.open(requests.get(__a , stream=__a ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( __a , __a , __a , __a = False ) -> List[str]:
"""simple docstring"""
lowerCamelCase__: int =get_yolos_config(__a )
# load original state_dict
lowerCamelCase__: Optional[int] =torch.load(__a , map_location="cpu" )["model"]
# load 🤗 model
lowerCamelCase__: int =YolosForObjectDetection(__a )
model.eval()
lowerCamelCase__: Union[str, Any] =convert_state_dict(__a , __a )
model.load_state_dict(__a )
# Check outputs on an image, prepared by YolosImageProcessor
lowerCamelCase__: Any =800 if yolos_name != "yolos_ti" else 512
lowerCamelCase__: Tuple =YolosImageProcessor(format="coco_detection" , size=__a )
lowerCamelCase__: str =image_processor(images=prepare_img() , return_tensors="pt" )
lowerCamelCase__: Tuple =model(**__a )
lowerCamelCase__ , lowerCamelCase__: List[str] =outputs.logits, outputs.pred_boxes
lowerCamelCase__ , lowerCamelCase__: Any =None, None
if yolos_name == "yolos_ti":
lowerCamelCase__: Optional[Any] =torch.tensor(
[[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] )
lowerCamelCase__: List[Any] =torch.tensor(
[[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] )
elif yolos_name == "yolos_s_200_pre":
lowerCamelCase__: Optional[int] =torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] )
lowerCamelCase__: Any =torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] )
elif yolos_name == "yolos_s_300_pre":
lowerCamelCase__: str =torch.tensor(
[[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] )
lowerCamelCase__: Optional[Any] =torch.tensor(
[[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] )
elif yolos_name == "yolos_s_dWr":
lowerCamelCase__: str =torch.tensor(
[[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] )
lowerCamelCase__: Union[str, Any] =torch.tensor(
[[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] )
elif yolos_name == "yolos_base":
lowerCamelCase__: Tuple =torch.tensor(
[[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] )
lowerCamelCase__: Optional[int] =torch.tensor(
[[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] )
else:
raise ValueError(F"""Unknown yolos_name: {yolos_name}""" )
assert torch.allclose(logits[0, :3, :3] , __a , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __a , atol=1e-4 )
Path(__a ).mkdir(exist_ok=__a )
print(F"""Saving model {yolos_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__a )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__a )
if push_to_hub:
lowerCamelCase__: Any ={
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
lowerCamelCase__: Optional[int] =model_mapping[yolos_name]
image_processor.push_to_hub(__a , organization="hustvl" )
model.push_to_hub(__a , organization="hustvl" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--yolos_name",
default="yolos_s_200_pre",
type=str,
help=(
"Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"
" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."
),
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__A = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 10 | 0 |
"""simple docstring"""
import os
import numpy
import onnx
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> Any:
UpperCamelCase_: Union[str, Any] = a.name
UpperCamelCase_: Any = b.name
UpperCamelCase_: str = ''
UpperCamelCase_: Tuple = ''
UpperCamelCase_: Dict = a == b
UpperCamelCase_: List[str] = name_a
UpperCamelCase_: int = name_b
return res
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Dict:
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(UpperCAmelCase__ , UpperCAmelCase__ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase__ , UpperCAmelCase__ )
_graph_replace_input_with(node_proto.attribute[1].g , UpperCAmelCase__ , UpperCAmelCase__ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase__ , UpperCAmelCase__ )
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Dict:
for n in graph_proto.node:
_node_replace_input_with(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]:
UpperCamelCase_: List[Any] = list(model.graph.initializer )
UpperCamelCase_: Optional[int] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
UpperCamelCase_: Any = inits[i].name
UpperCamelCase_: int = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , UpperCAmelCase__ , UpperCAmelCase__ )
def snake_case (UpperCAmelCase__ ) -> Tuple:
UpperCamelCase_: Optional[Any] = os.path.dirname(UpperCAmelCase__ )
UpperCamelCase_: Optional[Any] = os.path.basename(UpperCAmelCase__ )
UpperCamelCase_: Any = onnx.load(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) )
UpperCamelCase_: Tuple = list(model.graph.initializer )
UpperCamelCase_: Optional[Any] = set()
UpperCamelCase_: str = {}
UpperCamelCase_: Any = []
UpperCamelCase_: List[str] = 0
for i in range(len(UpperCAmelCase__ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(UpperCAmelCase__ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(UpperCAmelCase__ )
dup_set.add(UpperCAmelCase__ )
UpperCamelCase_: int = inits[j].data_type
UpperCamelCase_: Optional[int] = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 1_1:
mem_size *= 8
else:
print('unexpected data type: ' , UpperCAmelCase__ )
total_reduced_size += mem_size
UpperCamelCase_: Tuple = inits[i].name
UpperCamelCase_: int = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(UpperCAmelCase__ )
else:
UpperCamelCase_: Dict = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1_0_2_4 / 1_0_2_4 / 1_0_2_4 , 'GB' )
UpperCamelCase_: Union[str, Any] = sorted(UpperCAmelCase__ )
_remove_dup_initializers_from_model(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
UpperCamelCase_: Tuple = 'optimized_' + model_file_name
UpperCamelCase_: Optional[Any] = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
onnx.save(UpperCAmelCase__ , UpperCAmelCase__ )
return new_model | 353 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A_ : Tuple = {
'configuration_funnel': ['FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FunnelConfig'],
'convert_funnel_original_tf_checkpoint_to_pytorch': [],
'tokenization_funnel': ['FunnelTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[str] = ['FunnelTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[str] = [
'FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'FunnelBaseModel',
'FunnelForMaskedLM',
'FunnelForMultipleChoice',
'FunnelForPreTraining',
'FunnelForQuestionAnswering',
'FunnelForSequenceClassification',
'FunnelForTokenClassification',
'FunnelModel',
'FunnelPreTrainedModel',
'load_tf_weights_in_funnel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = [
'TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFFunnelBaseModel',
'TFFunnelForMaskedLM',
'TFFunnelForMultipleChoice',
'TFFunnelForPreTraining',
'TFFunnelForQuestionAnswering',
'TFFunnelForSequenceClassification',
'TFFunnelForTokenClassification',
'TFFunnelModel',
'TFFunnelPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
A_ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 292 | 0 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
a =get_tests_dir("""fixtures/test_sentencepiece_bpe.model""")
class A_ ( a__ , unittest.TestCase ):
_UpperCAmelCase : Optional[Any] = BartphoTokenizer
_UpperCAmelCase : Any = False
_UpperCAmelCase : str = True
def lowerCAmelCase ( self : Union[str, Any]):
super().setUp()
__lowerCamelCase : Any = ['▁This', '▁is', '▁a', '▁t', 'est']
__lowerCamelCase : Optional[int] = dict(zip(SCREAMING_SNAKE_CASE_ ,range(len(SCREAMING_SNAKE_CASE_))))
__lowerCamelCase : List[Any] = {'unk_token': '<unk>'}
__lowerCamelCase : int = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['monolingual_vocab_file'])
with open(self.monolingual_vocab_file ,'w' ,encoding='utf-8') as fp:
for token in vocab_tokens:
fp.write(F"{token} {vocab_tokens[token]}\n")
__lowerCamelCase : List[str] = BartphoTokenizer(SCREAMING_SNAKE_CASE_ ,self.monolingual_vocab_file ,**self.special_tokens_map)
tokenizer.save_pretrained(self.tmpdirname)
def lowerCAmelCase ( self : int ,**SCREAMING_SNAKE_CASE__ : Optional[Any]):
kwargs.update(self.special_tokens_map)
return BartphoTokenizer.from_pretrained(self.tmpdirname ,**SCREAMING_SNAKE_CASE_)
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : List[Any]):
__lowerCamelCase : Optional[Any] = 'This is a là test'
__lowerCamelCase : List[Any] = 'This is a<unk><unk> test'
return input_text, output_text
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : List[Any] = BartphoTokenizer(SCREAMING_SNAKE_CASE_ ,self.monolingual_vocab_file ,**self.special_tokens_map)
__lowerCamelCase : Optional[int] = 'This is a là test'
__lowerCamelCase : Any = '▁This ▁is ▁a ▁l à ▁t est'.split()
__lowerCamelCase : Dict = tokenizer.tokenize(SCREAMING_SNAKE_CASE_)
self.assertListEqual(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_)
__lowerCamelCase : str = tokens + [tokenizer.unk_token]
__lowerCamelCase : int = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_) ,SCREAMING_SNAKE_CASE_)
| 73 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'''google/canine-s''': '''https://huggingface.co/google/canine-s/resolve/main/config.json''',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : Any = "canine"
def __init__( self, SCREAMING_SNAKE_CASE_=768, SCREAMING_SNAKE_CASE_=12, SCREAMING_SNAKE_CASE_=12, SCREAMING_SNAKE_CASE_=3072, SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=1_6384, SCREAMING_SNAKE_CASE_=16, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=1e-12, SCREAMING_SNAKE_CASE_=0, SCREAMING_SNAKE_CASE_=0XE000, SCREAMING_SNAKE_CASE_=0XE001, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=8, SCREAMING_SNAKE_CASE_=1_6384, SCREAMING_SNAKE_CASE_=128, **SCREAMING_SNAKE_CASE_, ) -> int:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_, bos_token_id=SCREAMING_SNAKE_CASE_, eos_token_id=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = max_position_embeddings
UpperCamelCase : Tuple = hidden_size
UpperCamelCase : Union[str, Any] = num_hidden_layers
UpperCamelCase : Optional[int] = num_attention_heads
UpperCamelCase : Tuple = intermediate_size
UpperCamelCase : List[str] = hidden_act
UpperCamelCase : Union[str, Any] = hidden_dropout_prob
UpperCamelCase : Optional[int] = attention_probs_dropout_prob
UpperCamelCase : Optional[Any] = initializer_range
UpperCamelCase : Tuple = type_vocab_size
UpperCamelCase : Any = layer_norm_eps
# Character config:
UpperCamelCase : List[Any] = downsampling_rate
UpperCamelCase : Optional[int] = upsampling_kernel_size
UpperCamelCase : Tuple = num_hash_functions
UpperCamelCase : Union[str, Any] = num_hash_buckets
UpperCamelCase : str = local_transformer_stride
| 119 | 0 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = RobertaTokenizer
UpperCamelCase__ = RobertaTokenizerFast
UpperCamelCase__ = True
UpperCamelCase__ = {'''cls_token''': '''<s>'''}
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase_ : List[str] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
lowercase_ : Tuple = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
lowercase_ : int = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
lowercase_ : Dict = {"""unk_token""": """<unk>"""}
lowercase_ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowercase_ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowercase_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowercase_ ) )
def SCREAMING_SNAKE_CASE_ ( self : str , **lowercase_ : int ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , **lowercase_ : int ):
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Optional[Any] ):
lowercase_ : Dict = """lower newer"""
lowercase_ : int = """lower newer"""
return input_text, output_text
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Dict = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase_ : Tuple = """lower newer"""
lowercase_ : Dict = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
lowercase_ : List[str] = tokenizer.tokenize(lowercase_ ) # , add_prefix_space=True)
self.assertListEqual(lowercase_ , lowercase_ )
lowercase_ : Optional[int] = tokens + [tokenizer.unk_token]
lowercase_ : int = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ) , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : List[str] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=lowercase_ ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=lowercase_ ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : List[Any] = self.tokenizer_class.from_pretrained("""roberta-base""" )
lowercase_ : Any = tokenizer.encode("""sequence builders""" , add_special_tokens=lowercase_ )
lowercase_ : Tuple = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowercase_ )
lowercase_ : Dict = tokenizer.encode(
"""sequence builders""" , add_special_tokens=lowercase_ , add_prefix_space=lowercase_ )
lowercase_ : Optional[Any] = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=lowercase_ , add_prefix_space=lowercase_ )
lowercase_ : List[str] = tokenizer.build_inputs_with_special_tokens(lowercase_ )
lowercase_ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(lowercase_ , lowercase_ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : List[str] = self.get_tokenizer()
lowercase_ : Tuple = """Encode this sequence."""
lowercase_ : str = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
lowercase_ : str = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ , add_prefix_space=lowercase_ )
lowercase_ : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowercase_ , lowercase_ )
lowercase_ : List[str] = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ , add_prefix_space=lowercase_ )
lowercase_ : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowercase_ , lowercase_ )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
lowercase_ : Dict = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
lowercase_ : Tuple = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowercase_ , lowercase_ )
# Testing spaces after special tokens
lowercase_ : Dict = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ )} ) # mask token has a left space
lowercase_ : Dict = tokenizer.convert_tokens_to_ids(lowercase_ )
lowercase_ : Dict = """Encode <mask> sequence"""
lowercase_ : int = """Encode <mask>sequence"""
lowercase_ : Optional[Any] = tokenizer.encode(lowercase_ )
lowercase_ : List[str] = encoded.index(lowercase_ )
lowercase_ : List[str] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowercase_ , lowercase_ )
lowercase_ : List[Any] = tokenizer.encode(lowercase_ )
lowercase_ : Optional[Any] = encoded.index(lowercase_ )
lowercase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
pass
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase_ : List[str] = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
lowercase_ : Union[str, Any] = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
lowercase_ : Tuple = """A, <mask> AllenNLP sentence."""
lowercase_ : str = tokenizer_r.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ )
lowercase_ : Union[str, Any] = tokenizer_p.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
lowercase_ : List[str] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
lowercase_ : Dict = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
lowercase_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
lowercase_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
lowercase_ : Dict = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
lowercase_ : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
lowercase_ : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , lowercase_ )
self.assertEqual(post_processor_state["""add_prefix_space"""] , lowercase_ )
self.assertEqual(post_processor_state["""trim_offsets"""] , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase_ : Union[str, Any] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
lowercase_ : List[str] = f'''{text_of_1_token} {text_of_1_token}'''
lowercase_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
lowercase_ : List[Any] = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase_ ) + 1, len(lowercase_ ) + 1 + len(lowercase_ )) , )
lowercase_ : Tuple = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
lowercase_ : Optional[Any] = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase_ ) + 1, len(lowercase_ ) + 1 + len(lowercase_ )) , )
lowercase_ : int = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
lowercase_ : int = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase_ ), len(lowercase_ ) + 1 + len(lowercase_ )) , )
lowercase_ : List[str] = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
lowercase_ : Union[str, Any] = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase_ ), len(lowercase_ ) + 1 + len(lowercase_ )) , )
lowercase_ : Union[str, Any] = f''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
lowercase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
lowercase_ : List[str] = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase_ ) + 1, 1 + len(lowercase_ ) + 1 + len(lowercase_ )) , )
lowercase_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
lowercase_ : List[str] = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase_ ), 1 + len(lowercase_ ) + 1 + len(lowercase_ )) , )
lowercase_ : Any = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
lowercase_ : Any = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase_ ), 1 + len(lowercase_ ) + 1 + len(lowercase_ )) , )
| 21 | '''simple docstring'''
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
_lowercase : Union[str, Any] = "src/transformers"
_lowercase : str = "docs/source/en"
_lowercase : Union[str, Any] = "."
def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] ) -> int:
with open(UpperCAmelCase__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowercase_ : Union[str, Any] = f.readlines()
# Find the start prompt.
lowercase_ : Optional[Any] = 0
while not lines[start_index].startswith(UpperCAmelCase__ ):
start_index += 1
start_index += 1
lowercase_ : int = start_index
while not lines[end_index].startswith(UpperCAmelCase__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
_lowercase : int = "Model|Encoder|Decoder|ForConditionalGeneration"
# Regexes that match TF/Flax/PT model names.
_lowercase : str = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
_lowercase : Optional[Any] = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_lowercase : int = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# This is to make sure the transformers module imported is the one in the repo.
_lowercase : Optional[Any] = direct_transformers_import(TRANSFORMERS_PATH)
def lowerCamelCase ( UpperCAmelCase__ : int ) -> Any:
lowercase_ : str = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , UpperCAmelCase__ )
return [m.group(0 ) for m in matches]
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple ) -> List[Any]:
lowercase_ : Dict = 2 if text == """✅""" or text == """❌""" else len(UpperCAmelCase__ )
lowercase_ : List[str] = (width - text_length) // 2
lowercase_ : Dict = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def lowerCamelCase ( ) -> Any:
lowercase_ : int = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
lowercase_ : Any = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
lowercase_ : int = {name: config.replace("""Config""" , """""" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
lowercase_ : List[Any] = collections.defaultdict(UpperCAmelCase__ )
lowercase_ : List[str] = collections.defaultdict(UpperCAmelCase__ )
lowercase_ : Any = collections.defaultdict(UpperCAmelCase__ )
lowercase_ : Tuple = collections.defaultdict(UpperCAmelCase__ )
lowercase_ : Optional[int] = collections.defaultdict(UpperCAmelCase__ )
# Let's lookup through all transformers object (once).
for attr_name in dir(UpperCAmelCase__ ):
lowercase_ : Union[str, Any] = None
if attr_name.endswith("""Tokenizer""" ):
lowercase_ : Optional[int] = slow_tokenizers
lowercase_ : Union[str, Any] = attr_name[:-9]
elif attr_name.endswith("""TokenizerFast""" ):
lowercase_ : Optional[Any] = fast_tokenizers
lowercase_ : Dict = attr_name[:-13]
elif _re_tf_models.match(UpperCAmelCase__ ) is not None:
lowercase_ : str = tf_models
lowercase_ : str = _re_tf_models.match(UpperCAmelCase__ ).groups()[0]
elif _re_flax_models.match(UpperCAmelCase__ ) is not None:
lowercase_ : List[str] = flax_models
lowercase_ : int = _re_flax_models.match(UpperCAmelCase__ ).groups()[0]
elif _re_pt_models.match(UpperCAmelCase__ ) is not None:
lowercase_ : Tuple = pt_models
lowercase_ : Optional[int] = _re_pt_models.match(UpperCAmelCase__ ).groups()[0]
if lookup_dict is not None:
while len(UpperCAmelCase__ ) > 0:
if attr_name in model_name_to_prefix.values():
lowercase_ : int = True
break
# Try again after removing the last word in the name
lowercase_ : Optional[Any] = """""".join(camel_case_split(UpperCAmelCase__ )[:-1] )
# Let's build that table!
lowercase_ : Dict = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
lowercase_ : Optional[Any] = ["""Model""", """Tokenizer slow""", """Tokenizer fast""", """PyTorch support""", """TensorFlow support""", """Flax Support"""]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
lowercase_ : Union[str, Any] = [len(UpperCAmelCase__ ) + 2 for c in columns]
lowercase_ : int = max([len(UpperCAmelCase__ ) for name in model_names] ) + 2
# Build the table per se
lowercase_ : Tuple = """|""" + """|""".join([_center_text(UpperCAmelCase__ , UpperCAmelCase__ ) for c, w in zip(UpperCAmelCase__ , UpperCAmelCase__ )] ) + """|\n"""
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([""":""" + """-""" * (w - 2) + """:""" for w in widths] ) + "|\n"
lowercase_ : int = {True: """✅""", False: """❌"""}
for name in model_names:
lowercase_ : str = model_name_to_prefix[name]
lowercase_ : Any = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(UpperCAmelCase__ , UpperCAmelCase__ ) for l, w in zip(UpperCAmelCase__ , UpperCAmelCase__ )] ) + "|\n"
return table
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any]=False ) -> str:
lowercase_ , lowercase_ , lowercase_ , lowercase_ : List[str] = _find_text_in_file(
filename=os.path.join(UpperCAmelCase__ , """index.md""" ) , start_prompt="""<!--This table is updated automatically from the auto modules""" , end_prompt="""<!-- End table-->""" , )
lowercase_ : Dict = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(UpperCAmelCase__ , """index.md""" ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"""The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.""" )
if __name__ == "__main__":
_lowercase : Any = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
_lowercase : Optional[Any] = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 21 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE :Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :str = {
'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = "sew-d"
def __init__( self : List[str] ,A : Dict=32 ,A : Optional[int]=7_68 ,A : Dict=12 ,A : Union[str, Any]=12 ,A : Any=30_72 ,A : List[str]=2 ,A : List[str]=5_12 ,A : str=2_56 ,A : int=True ,A : Any=True ,A : Tuple=("p2c", "c2p") ,A : str="layer_norm" ,A : List[str]="gelu_python" ,A : Union[str, Any]=0.1 ,A : Optional[Any]=0.1 ,A : Dict=0.1 ,A : Optional[int]=0.0 ,A : List[str]=0.1 ,A : int=0.02 ,A : Any=1E-7 ,A : List[Any]=1E-5 ,A : str="group" ,A : int="gelu" ,A : List[str]=(64, 1_28, 1_28, 1_28, 1_28, 2_56, 2_56, 2_56, 2_56, 5_12, 5_12, 5_12, 5_12) ,A : Dict=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) ,A : Dict=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) ,A : Union[str, Any]=False ,A : Dict=1_28 ,A : Dict=16 ,A : Optional[Any]=True ,A : List[str]=0.05 ,A : Dict=10 ,A : Tuple=2 ,A : List[Any]=0.0 ,A : List[str]=10 ,A : List[Any]=0 ,A : Dict="mean" ,A : str=False ,A : int=False ,A : Optional[int]=2_56 ,A : Optional[int]=0 ,A : List[Any]=1 ,A : Tuple=2 ,**A : Dict ,):
super().__init__(**A ,pad_token_id=A ,bos_token_id=A ,eos_token_id=A )
__A = hidden_size
__A = feat_extract_norm
__A = feat_extract_activation
__A = list(A )
__A = list(A )
__A = list(A )
__A = conv_bias
__A = num_conv_pos_embeddings
__A = num_conv_pos_embedding_groups
__A = len(self.conv_dim )
__A = num_hidden_layers
__A = intermediate_size
__A = squeeze_factor
__A = max_position_embeddings
__A = position_buckets
__A = share_att_key
__A = relative_attention
__A = norm_rel_ebd
__A = list(A )
__A = hidden_act
__A = num_attention_heads
__A = hidden_dropout
__A = attention_dropout
__A = activation_dropout
__A = feat_proj_dropout
__A = final_dropout
__A = layer_norm_eps
__A = feature_layer_norm_eps
__A = initializer_range
__A = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect."
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"
f'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
f'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__A = apply_spec_augment
__A = mask_time_prob
__A = mask_time_length
__A = mask_time_min_masks
__A = mask_feature_prob
__A = mask_feature_length
__A = mask_feature_min_masks
# ctc loss
__A = ctc_loss_reduction
__A = ctc_zero_infinity
# sequence classification
__A = use_weighted_layer_sum
__A = classifier_proj_size
@property
def UpperCamelCase_ ( self : List[str] ):
return functools.reduce(operator.mul ,self.conv_stride ,1 )
| 15 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : Any = {
'huggingface/informer-tourism-monthly': (
'https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : List[Any] = '''informer'''
__UpperCamelCase : List[str] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self : Optional[Any] , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : str = "student_t" , lowerCAmelCase_ : str = "nll" , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : List[int] = None , lowerCAmelCase_ : Optional[Union[str, bool]] = "mean" , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : int = 6_4 , lowerCAmelCase_ : int = 3_2 , lowerCAmelCase_ : int = 3_2 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : str = "gelu" , lowerCAmelCase_ : float = 0.05 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : int = 1_0_0 , lowerCAmelCase_ : float = 0.02 , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : str = "prob" , lowerCAmelCase_ : int = 5 , lowerCAmelCase_ : bool = True , **lowerCAmelCase_ : str , ):
"""simple docstring"""
# time series specific configuration
_A: Optional[Any] = prediction_length
_A: Optional[Any] = context_length or prediction_length
_A: Dict = distribution_output
_A: List[str] = loss
_A: int = input_size
_A: List[str] = num_time_features
_A: Optional[Any] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
_A: str = scaling
_A: Optional[Any] = num_dynamic_real_features
_A: List[Any] = num_static_real_features
_A: Tuple = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(lowerCAmelCase_ ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
_A: str = cardinality
else:
_A: Union[str, Any] = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(lowerCAmelCase_ ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
_A: List[str] = embedding_dimension
else:
_A: Union[str, Any] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
_A: int = num_parallel_samples
# Transformer architecture configuration
_A: Union[str, Any] = input_size * len(self.lags_sequence ) + self._number_of_features
_A: Union[str, Any] = d_model
_A: Optional[Any] = encoder_attention_heads
_A: Optional[Any] = decoder_attention_heads
_A: Optional[Any] = encoder_ffn_dim
_A: Union[str, Any] = decoder_ffn_dim
_A: Any = encoder_layers
_A: str = decoder_layers
_A: List[str] = dropout
_A: Any = attention_dropout
_A: Optional[int] = activation_dropout
_A: List[Any] = encoder_layerdrop
_A: str = decoder_layerdrop
_A: int = activation_function
_A: Tuple = init_std
_A: Union[str, Any] = use_cache
# Informer
_A: Union[str, Any] = attention_type
_A: str = sampling_factor
_A: List[str] = distil
super().__init__(is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 121 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''microsoft/deberta-v2-xlarge''': '''https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xxlarge''': '''https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'''
),
'''microsoft/deberta-v2-xxlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
a : Any ="deberta-v2"
def __init__( self , snake_case__=128_100 , snake_case__=1_536 , snake_case__=24 , snake_case__=24 , snake_case__=6_144 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=0 , snake_case__=0.02 , snake_case__=1e-7 , snake_case__=False , snake_case__=-1 , snake_case__=0 , snake_case__=True , snake_case__=None , snake_case__=0 , snake_case__="gelu" , **snake_case__ , ):
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
lowerCAmelCase : List[Any] = hidden_size
lowerCAmelCase : List[Any] = num_hidden_layers
lowerCAmelCase : Dict = num_attention_heads
lowerCAmelCase : Optional[Any] = intermediate_size
lowerCAmelCase : str = hidden_act
lowerCAmelCase : List[str] = hidden_dropout_prob
lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase : Any = max_position_embeddings
lowerCAmelCase : List[str] = type_vocab_size
lowerCAmelCase : Union[str, Any] = initializer_range
lowerCAmelCase : str = relative_attention
lowerCAmelCase : Optional[int] = max_relative_positions
lowerCAmelCase : List[str] = pad_token_id
lowerCAmelCase : Dict = position_biased_input
# Backwards compatibility
if type(UpperCamelCase__ ) == str:
lowerCAmelCase : int = [x.strip() for x in pos_att_type.lower().split("|" )]
lowerCAmelCase : Dict = pos_att_type
lowerCAmelCase : Optional[int] = vocab_size
lowerCAmelCase : Optional[int] = layer_norm_eps
lowerCAmelCase : Union[str, Any] = kwargs.get("pooler_hidden_size" , UpperCamelCase__ )
lowerCAmelCase : Union[str, Any] = pooler_dropout
lowerCAmelCase : Optional[Any] = pooler_hidden_act
class SCREAMING_SNAKE_CASE__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@property
def lowercase__ ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
lowerCAmelCase : int = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCAmelCase : str = {0: '''batch''', 1: '''sequence'''}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis)] )
else:
return OrderedDict([("input_ids", dynamic_axis), ("attention_mask", dynamic_axis)] )
@property
def lowercase__ ( self ):
"""simple docstring"""
return 12
def lowercase__ ( self , snake_case__ , snake_case__ = -1 , snake_case__ = -1 , snake_case__ = -1 , snake_case__ = False , snake_case__ = None , snake_case__ = 3 , snake_case__ = 40 , snake_case__ = 40 , snake_case__ = None , ):
"""simple docstring"""
lowerCAmelCase : int = super().generate_dummy_inputs(preprocessor=UpperCamelCase__ , framework=UpperCamelCase__ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 367 |
"""simple docstring"""
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def a__ ( SCREAMING_SNAKE_CASE : str ): # picklable for multiprocessing
'''simple docstring'''
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def a__ ( ):
'''simple docstring'''
with parallel_backend("spark" ):
assert ParallelBackendConfig.backend_name == "spark"
lowerCAmelCase : List[str] = [1, 2, 3]
with pytest.raises(SCREAMING_SNAKE_CASE ):
with parallel_backend("unsupported backend" ):
map_nested(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num_proc=2 )
with pytest.raises(SCREAMING_SNAKE_CASE ):
with parallel_backend("unsupported backend" ):
map_nested(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc" , [2, -1] )
def a__ ( SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
lowerCAmelCase : Tuple = [1, 2]
lowerCAmelCase : int = {"a": 1, "b": 2}
lowerCAmelCase : List[str] = {"a": [1, 2], "b": [3, 4]}
lowerCAmelCase : Dict = {"a": {"1": 1}, "b": 2}
lowerCAmelCase : Tuple = {"a": 1, "b": 2, "c": 3, "d": 4}
lowerCAmelCase : Any = [2, 3]
lowerCAmelCase : Any = {"a": 2, "b": 3}
lowerCAmelCase : Optional[int] = {"a": [2, 3], "b": [4, 5]}
lowerCAmelCase : Optional[int] = {"a": {"1": 2}, "b": 3}
lowerCAmelCase : str = {"a": 2, "b": 3, "c": 4, "d": 5}
with parallel_backend("spark" ):
assert map_nested(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num_proc=SCREAMING_SNAKE_CASE ) == expected_map_nested_sa
assert map_nested(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num_proc=SCREAMING_SNAKE_CASE ) == expected_map_nested_sa
assert map_nested(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num_proc=SCREAMING_SNAKE_CASE ) == expected_map_nested_sa
assert map_nested(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num_proc=SCREAMING_SNAKE_CASE ) == expected_map_nested_sa
assert map_nested(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num_proc=SCREAMING_SNAKE_CASE ) == expected_map_nested_sa
| 133 | 0 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any:
"""simple docstring"""
if isinstance(__a , torch.Tensor ):
return image
elif isinstance(__a , PIL.Image.Image ):
snake_case_ : str = [image]
if isinstance(image[0] , PIL.Image.Image ):
snake_case_ : List[str] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
snake_case_ : Any = np.concatenate(__a , axis=0 )
snake_case_ : str = np.array(__a ).astype(np.floataa ) / 255.0
snake_case_ : Tuple = image.transpose(0 , 3 , 1 , 2 )
snake_case_ : Union[str, Any] = 2.0 * image - 1.0
snake_case_ : Any = torch.from_numpy(__a )
elif isinstance(image[0] , torch.Tensor ):
snake_case_ : Dict = torch.cat(__a , dim=0 )
return image
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=0.9_995 ) -> str:
"""simple docstring"""
if not isinstance(__a , np.ndarray ):
snake_case_ : int = True
snake_case_ : Dict = va.device
snake_case_ : List[Any] = va.cpu().numpy()
snake_case_ : List[str] = va.cpu().numpy()
snake_case_ : Optional[int] = np.sum(va * va / (np.linalg.norm(__a ) * np.linalg.norm(__a )) )
if np.abs(__a ) > DOT_THRESHOLD:
snake_case_ : str = (1 - t) * va + t * va
else:
snake_case_ : List[str] = np.arccos(__a )
snake_case_ : Tuple = np.sin(__a )
snake_case_ : str = theta_a * t
snake_case_ : List[Any] = np.sin(__a )
snake_case_ : Tuple = np.sin(theta_a - theta_t ) / sin_theta_a
snake_case_ : int = sin_theta_t / sin_theta_a
snake_case_ : List[Any] = sa * va + sa * va
if inputs_are_torch:
snake_case_ : int = torch.from_numpy(__a ).to(__a )
return va
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
snake_case_ : str = F.normalize(__a , dim=-1 )
snake_case_ : Union[str, Any] = F.normalize(__a , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> List[Any]:
"""simple docstring"""
for param in model.parameters():
snake_case_ : Any = value
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__=None , ) -> List[str]:
'''simple docstring'''
super().__init__()
self.register_modules(
vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , clip_model=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , coca_model=UpperCAmelCase_ , coca_tokenizer=UpperCAmelCase_ , coca_transform=UpperCAmelCase_ , )
snake_case_ : Tuple = (
feature_extractor.size
if isinstance(feature_extractor.size , UpperCAmelCase_ )
else feature_extractor.size["shortest_edge"]
)
snake_case_ : int = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , UpperCAmelCase_ )
set_requires_grad(self.clip_model , UpperCAmelCase_ )
def lowerCamelCase (self , __magic_name__ = "auto" ) -> Union[str, Any]:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
snake_case_ : List[str] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCAmelCase_ )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
self.enable_attention_slicing(UpperCAmelCase_ )
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
set_requires_grad(self.vae , UpperCAmelCase_ )
def lowerCamelCase (self ) -> str:
'''simple docstring'''
set_requires_grad(self.vae , UpperCAmelCase_ )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
set_requires_grad(self.unet , UpperCAmelCase_ )
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
set_requires_grad(self.unet , UpperCAmelCase_ )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> str:
'''simple docstring'''
snake_case_ : str = min(int(num_inference_steps * strength ) , UpperCAmelCase_ )
snake_case_ : Tuple = max(num_inference_steps - init_timestep , 0 )
snake_case_ : List[str] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None ) -> Union[str, Any]:
'''simple docstring'''
if not isinstance(UpperCAmelCase_ , torch.Tensor ):
raise ValueError(F'''`image` has to be of type `torch.Tensor` but is {type(UpperCAmelCase_ )}''' )
snake_case_ : Optional[int] = image.to(device=UpperCAmelCase_ , dtype=UpperCAmelCase_ )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
snake_case_ : Any = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(UpperCAmelCase_ )
]
snake_case_ : List[Any] = torch.cat(UpperCAmelCase_ , dim=0 )
else:
snake_case_ : Tuple = self.vae.encode(UpperCAmelCase_ ).latent_dist.sample(UpperCAmelCase_ )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
snake_case_ : List[str] = 0.18_215 * init_latents
snake_case_ : Optional[int] = init_latents.repeat_interleave(UpperCAmelCase_ , dim=0 )
snake_case_ : List[Any] = randn_tensor(init_latents.shape , generator=UpperCAmelCase_ , device=UpperCAmelCase_ , dtype=UpperCAmelCase_ )
# get latents
snake_case_ : str = self.scheduler.add_noise(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ : Optional[int] = init_latents
return latents
def lowerCamelCase (self , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[int] = self.coca_transform(UpperCAmelCase_ ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
snake_case_ : str = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
snake_case_ : str = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('''<end_of_text>''' )[0].replace('''<start_of_text>''' , '''''' ).rstrip(''' .,''' )
def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> str:
'''simple docstring'''
snake_case_ : Optional[Any] = self.feature_extractor.preprocess(UpperCAmelCase_ )
snake_case_ : Optional[Any] = torch.from_numpy(clip_image_input['''pixel_values'''][0] ).unsqueeze(0 ).to(self.device ).half()
snake_case_ : Any = self.clip_model.get_image_features(UpperCAmelCase_ )
snake_case_ : int = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=UpperCAmelCase_ )
snake_case_ : Union[str, Any] = image_embeddings_clip.repeat_interleave(UpperCAmelCase_ , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ) -> Any:
'''simple docstring'''
snake_case_ : str = latents.detach().requires_grad_()
snake_case_ : int = self.scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ )
# predict the noise residual
snake_case_ : str = self.unet(UpperCAmelCase_ , UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
snake_case_ : List[Any] = self.scheduler.alphas_cumprod[timestep]
snake_case_ : Tuple = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
snake_case_ : int = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
snake_case_ : Any = torch.sqrt(UpperCAmelCase_ )
snake_case_ : Optional[int] = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , UpperCAmelCase_ ):
snake_case_ : Any = self.scheduler.sigmas[index]
snake_case_ : List[str] = latents - sigma * noise_pred
else:
raise ValueError(F'''scheduler type {type(self.scheduler )} not supported''' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
snake_case_ : Any = 1 / 0.18_215 * sample
snake_case_ : Any = self.vae.decode(UpperCAmelCase_ ).sample
snake_case_ : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
snake_case_ : List[Any] = transforms.Resize(self.feature_extractor_size )(UpperCAmelCase_ )
snake_case_ : Dict = self.normalize(UpperCAmelCase_ ).to(latents.dtype )
snake_case_ : Union[str, Any] = self.clip_model.get_image_features(UpperCAmelCase_ )
snake_case_ : List[str] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=UpperCAmelCase_ )
snake_case_ : List[Any] = spherical_dist_loss(UpperCAmelCase_ , UpperCAmelCase_ ).mean() * clip_guidance_scale
snake_case_ : Dict = -torch.autograd.grad(UpperCAmelCase_ , UpperCAmelCase_ )[0]
if isinstance(self.scheduler , UpperCAmelCase_ ):
snake_case_ : Dict = latents.detach() + grads * (sigma**2)
snake_case_ : List[str] = noise_pred_original
else:
snake_case_ : int = noise_pred_original - torch.sqrt(UpperCAmelCase_ ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__(self , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = 512 , __magic_name__ = 512 , __magic_name__ = 0.6 , __magic_name__ = 50 , __magic_name__ = 7.5 , __magic_name__ = 1 , __magic_name__ = 0.0 , __magic_name__ = 100 , __magic_name__ = None , __magic_name__ = "pil" , __magic_name__ = True , __magic_name__ = 0.8 , __magic_name__ = 0.1 , __magic_name__ = 0.1 , ) -> Any:
'''simple docstring'''
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and len(UpperCAmelCase_ ) != batch_size:
raise ValueError(F'''You have passed {batch_size} batch_size, but only {len(UpperCAmelCase_ )} generators.''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if isinstance(UpperCAmelCase_ , torch.Generator ) and batch_size > 1:
snake_case_ : Optional[Any] = [generator] + [None] * (batch_size - 1)
snake_case_ : Optional[Any] = [
("model", self.coca_model is None),
("tokenizer", self.coca_tokenizer is None),
("transform", self.coca_transform is None),
]
snake_case_ : str = [x[0] for x in coca_is_none if x[1]]
snake_case_ : Optional[int] = ", ".join(UpperCAmelCase_ )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(UpperCAmelCase_ ):
raise ValueError(
F'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
F'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
snake_case_ : List[str] = self.get_image_description(UpperCAmelCase_ )
if style_prompt is None:
if len(UpperCAmelCase_ ):
raise ValueError(
F'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
F''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
snake_case_ : Optional[int] = self.get_image_description(UpperCAmelCase_ )
# get prompt text embeddings for content and style
snake_case_ : int = self.tokenizer(
UpperCAmelCase_ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=UpperCAmelCase_ , return_tensors='''pt''' , )
snake_case_ : Optional[Any] = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
snake_case_ : Tuple = self.tokenizer(
UpperCAmelCase_ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=UpperCAmelCase_ , return_tensors='''pt''' , )
snake_case_ : Any = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
snake_case_ : Tuple = slerp(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# duplicate text embeddings for each generation per prompt
snake_case_ : Dict = text_embeddings.repeat_interleave(UpperCAmelCase_ , dim=0 )
# set timesteps
snake_case_ : Dict = "offset" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
snake_case_ : str = {}
if accepts_offset:
snake_case_ : Tuple = 1
self.scheduler.set_timesteps(UpperCAmelCase_ , **UpperCAmelCase_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
snake_case_ : Optional[Any] = self.get_timesteps(UpperCAmelCase_ , UpperCAmelCase_ , self.device )
snake_case_ : Optional[Any] = timesteps[:1].repeat(UpperCAmelCase_ )
# Preprocess image
snake_case_ : List[Any] = preprocess(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ : List[str] = self.prepare_latents(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , text_embeddings.dtype , self.device , UpperCAmelCase_ )
snake_case_ : Tuple = preprocess(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ : int = self.prepare_latents(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , text_embeddings.dtype , self.device , UpperCAmelCase_ )
snake_case_ : Tuple = slerp(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if clip_guidance_scale > 0:
snake_case_ : Optional[int] = self.get_clip_image_embeddings(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ : List[Any] = self.get_clip_image_embeddings(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ : Optional[Any] = slerp(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
snake_case_ : str = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
snake_case_ : Optional[Any] = content_text_input.input_ids.shape[-1]
snake_case_ : int = self.tokenizer([''''''] , padding='''max_length''' , max_length=UpperCAmelCase_ , return_tensors='''pt''' )
snake_case_ : Any = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
snake_case_ : Optional[Any] = uncond_embeddings.repeat_interleave(UpperCAmelCase_ , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
snake_case_ : str = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
snake_case_ : Any = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
snake_case_ : Tuple = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
snake_case_ : int = torch.randn(UpperCAmelCase_ , generator=UpperCAmelCase_ , device='''cpu''' , dtype=UpperCAmelCase_ ).to(
self.device )
else:
snake_case_ : Any = torch.randn(UpperCAmelCase_ , generator=UpperCAmelCase_ , device=self.device , dtype=UpperCAmelCase_ )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
snake_case_ : List[str] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
snake_case_ : Union[str, Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
snake_case_ : Optional[Any] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
snake_case_ : Dict = {}
if accepts_eta:
snake_case_ : Optional[Any] = eta
# check if the scheduler accepts generator
snake_case_ : str = "generator" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
snake_case_ : Optional[int] = generator
with self.progress_bar(total=UpperCAmelCase_ ):
for i, t in enumerate(UpperCAmelCase_ ):
# expand the latents if we are doing classifier free guidance
snake_case_ : Any = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case_ : Dict = self.scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ )
# predict the noise residual
snake_case_ : int = self.unet(UpperCAmelCase_ , UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
snake_case_ : Any = noise_pred.chunk(2 )
snake_case_ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
snake_case_ : str = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
snake_case_ : Tuple = self.cond_fn(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , )
# compute the previous noisy sample x_t -> x_t-1
snake_case_ : Optional[int] = self.scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
snake_case_ : int = 1 / 0.18_215 * latents
snake_case_ : str = self.vae.decode(UpperCAmelCase_ ).sample
snake_case_ : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
snake_case_ : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case_ : Union[str, Any] = self.numpy_to_pil(UpperCAmelCase_ )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=UpperCAmelCase_ , nsfw_content_detected=UpperCAmelCase_ )
| 279 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
__A = logging.get_logger(__name__)
__A = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__A = {
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
__A = {
"yjernite/retribert-base-uncased": 512,
}
__A = {
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = PRETRAINED_INIT_CONFIGURATION
lowercase_ = RetriBertTokenizer
lowercase_ = ["input_ids", "attention_mask"]
def __init__(self : int , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Union[str, Any]="[UNK]" , UpperCAmelCase_ : Any="[SEP]" , UpperCAmelCase_ : List[str]="[PAD]" , UpperCAmelCase_ : Optional[Any]="[CLS]" , UpperCAmelCase_ : Optional[Any]="[MASK]" , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : str=None , **UpperCAmelCase_ : str , ) ->List[Any]:
'''simple docstring'''
super().__init__(
UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , tokenize_chinese_chars=UpperCAmelCase_ , strip_accents=UpperCAmelCase_ , **UpperCAmelCase_ , )
lowerCamelCase__: List[Any] =json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("lowercase" , UpperCAmelCase_) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCAmelCase_) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase_) != tokenize_chinese_chars
):
lowerCamelCase__: Dict =getattr(UpperCAmelCase_ , normalizer_state.pop("type"))
lowerCamelCase__: int =do_lower_case
lowerCamelCase__: int =strip_accents
lowerCamelCase__: List[str] =tokenize_chinese_chars
lowerCamelCase__: Tuple =normalizer_class(**UpperCAmelCase_)
lowerCamelCase__: Any =do_lower_case
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any]=None) ->List[str]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None) ->List[int]:
'''simple docstring'''
lowerCamelCase__: Tuple =[self.sep_token_id]
lowerCamelCase__: Optional[int] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
lowerCamelCase__: Tuple =self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_)
return tuple(UpperCAmelCase_)
| 10 | 0 |
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
SCREAMING_SNAKE_CASE_:Optional[Any] = logging.get_logger(__name__)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> Union[str, Any]:
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
if not is_sharded:
A : List[str] = os.path.abspath(_lowerCAmelCase )
logger.info(f'''Loading PyTorch weights from {pt_path}''' )
A : Optional[int] = torch.load(_lowerCAmelCase , map_location="""cpu""" )
logger.info(f'''PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.''' )
A : int = convert_pytorch_state_dict_to_flax(_lowerCAmelCase , _lowerCAmelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
A : int = convert_pytorch_sharded_state_dict_to_flax(_lowerCAmelCase , _lowerCAmelCase )
return flax_state_dict
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> (Tuple[str], np.ndarray):
"""simple docstring"""
def is_key_or_prefix_key_in_dict(_lowerCAmelCase ) -> bool:
return len(set(_lowerCAmelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
A : Union[str, Any] = pt_tuple_key[:-1] + ("""scale""",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(_lowerCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
A : Optional[Any] = pt_tuple_key[:-1] + ("""mean""",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(_lowerCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
A : int = pt_tuple_key[:-1] + ("""var""",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(_lowerCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
A : Optional[Any] = pt_tuple_key[:-1] + ("""embedding""",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(_lowerCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
A : Optional[Any] = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(_lowerCAmelCase ):
A : str = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
A : Any = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(_lowerCAmelCase ):
A : List[str] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
A : Union[str, Any] = pt_tuple_key[:-1] + ("""weight""",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
A : Dict = pt_tuple_key[:-1] + ("""bias""",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
A : Optional[int] = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
A : Optional[int] = pt_tuple_key[-2] + """_g"""
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
A : Union[str, Any] = pt_tuple_key[-2] + """_v"""
if name is not None:
A : Any = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
"""simple docstring"""
A : Optional[Any] = {k: v.numpy() for k, v in pt_state_dict.items()}
A : List[str] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
A : Union[str, Any] = flax_model.params["""params"""]
else:
A : Any = flax_model.params
A : Optional[int] = flatten_dict(_lowerCAmelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
A : Dict = flatten_dict(flax_model.params["""batch_stats"""] )
random_flax_state_dict.update(_lowerCAmelCase )
A : str = {}
A : str = (model_prefix not in flax_model_params) and (
model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
A : Dict = (model_prefix in flax_model_params) and (
model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
A : int = tuple(pt_key.split(""".""" ) )
# remove base model prefix if necessary
A : Any = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
A : List[Any] = pt_tuple_key[1:]
# Correctly rename weight parameters
A : Tuple = rename_key_and_reshape_tensor(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# add model prefix if necessary
A : Tuple = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
A : Union[str, Any] = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
A : List[Any] = jnp.asarray(_lowerCAmelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
continue
# also add unexpected weight so that warning is thrown
A : int = jnp.asarray(_lowerCAmelCase )
else:
# also add unexpected weight so that warning is thrown
A : List[Any] = jnp.asarray(_lowerCAmelCase )
return unflatten_dict(_lowerCAmelCase )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
"""simple docstring"""
import torch
# Load the index
A : List[Any] = {}
for shard_file in shard_filenames:
# load using msgpack utils
A : str = torch.load(_lowerCAmelCase )
A : Any = {k: v.numpy() for k, v in pt_state_dict.items()}
A : Dict = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
A : str = flax_model.params["""params"""]
A : Any = flatten_dict(_lowerCAmelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params["""batch_stats"""] ) )
else:
A : Dict = flax_model.params
A : Optional[int] = flatten_dict(_lowerCAmelCase )
A : str = (model_prefix not in flax_model_params) and (
model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
A : Any = (model_prefix in flax_model_params) and (
model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
A : List[Any] = tuple(pt_key.split(""".""" ) )
# remove base model prefix if necessary
A : Optional[Any] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
A : List[Any] = pt_tuple_key[1:]
# Correctly rename weight parameters
A : Dict = rename_key_and_reshape_tensor(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# add model prefix if necessary
A : Optional[int] = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
A : List[Any] = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
A : List[str] = jnp.asarray(_lowerCAmelCase )
continue
if "var" in flax_key[-1]:
A : int = jnp.asarray(_lowerCAmelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
continue
# also add unexpected weight so that warning is thrown
A : Union[str, Any] = jnp.asarray(_lowerCAmelCase )
else:
# also add unexpected weight so that warning is thrown
A : List[str] = jnp.asarray(_lowerCAmelCase )
return unflatten_dict(_lowerCAmelCase )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
A : Optional[Any] = os.path.abspath(_lowerCAmelCase )
logger.info(f'''Loading Flax weights from {flax_checkpoint_path}''' )
# import correct flax class
A : Tuple = getattr(_lowerCAmelCase , """Flax""" + model.__class__.__name__ )
# load flax weight dict
with open(_lowerCAmelCase , """rb""" ) as state_f:
try:
A : Dict = from_bytes(_lowerCAmelCase , state_f.read() )
except UnpicklingError:
raise EnvironmentError(f'''Unable to convert {flax_checkpoint_path} to Flax deserializable object. ''' )
return load_flax_weights_in_pytorch_model(_lowerCAmelCase , _lowerCAmelCase )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> str:
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
A : Optional[Any] = flatten_dict(jax.tree_util.tree_map(lambda _lowerCAmelCase : x.dtype == jnp.bfloataa , _lowerCAmelCase ) ).values()
if any(_lowerCAmelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
A : Optional[int] = jax.tree_util.tree_map(
lambda _lowerCAmelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _lowerCAmelCase )
A : int = flatten_dict(_lowerCAmelCase )
A : Tuple = pt_model.state_dict()
A : Union[str, Any] = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split(""".""" )[0] for k in pt_model_dict.keys()}
)
A : str = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split(""".""" )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
A : Any = []
A : List[Any] = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
A : str = flax_key_tuple[0] == pt_model.base_model_prefix
A : Union[str, Any] = """.""".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
A : str = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
A : List[Any] = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(_lowerCAmelCase ) not in pt_model_dict:
# conv layer
A : Union[str, Any] = flax_key_tuple[:-1] + ("""weight""",)
A : List[Any] = jnp.transpose(_lowerCAmelCase , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_lowerCAmelCase ) not in pt_model_dict:
# linear layer
A : Optional[int] = flax_key_tuple[:-1] + ("""weight""",)
A : Tuple = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
A : List[str] = flax_key_tuple[:-1] + ("""weight""",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
A : Optional[Any] = flax_key_tuple[:-1] + ("""running_mean""",)
elif "var" in flax_key_tuple[-1]:
A : Tuple = flax_key_tuple[:-1] + ("""running_var""",)
if "batch_stats" in flax_state:
A : List[str] = """.""".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
A : Union[str, Any] = """.""".join(_lowerCAmelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
A : List[str] = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
A : Optional[int] = key.split(""".""" )
A : List[Any] = None
if key_components[-3::2] == ["parametrizations", "original0"]:
A : Optional[int] = key_components[-2] + """_g"""
elif key_components[-3::2] == ["parametrizations", "original1"]:
A : Any = key_components[-2] + """_v"""
if name is not None:
A : int = key_components[:-3] + [name]
A : str = """.""".join(_lowerCAmelCase )
A : Dict = key
if flax_key in special_pt_names:
A : List[Any] = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '''
f'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
else:
# add weight to pytorch dict
A : Union[str, Any] = np.asarray(_lowerCAmelCase ) if not isinstance(_lowerCAmelCase , np.ndarray ) else flax_tensor
A : Tuple = torch.from_numpy(_lowerCAmelCase )
# remove from missing keys
missing_keys.remove(_lowerCAmelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_lowerCAmelCase )
pt_model.load_state_dict(_lowerCAmelCase )
# re-transform missing_keys to list
A : Tuple = list(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
f''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'''
f''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'''
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
f''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'''
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
else:
logger.warning(f'''All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n''' )
if len(_lowerCAmelCase ) > 0:
logger.warning(
f'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'''
f''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'''
""" use it for predictions and inference.""" )
else:
logger.warning(
f'''All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'''
"""If your task is similar to the task the model of the checkpoint was trained on, """
f'''you can already use {pt_model.__class__.__name__} for predictions without further training.''' )
return pt_model
| 367 |
from __future__ import annotations
def __UpperCamelCase ( _lowerCAmelCase ) -> list[int]:
"""simple docstring"""
A : Tuple = 2
A : List[Any] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(_lowerCAmelCase )
if n > 1:
factors.append(_lowerCAmelCase )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 115 | 0 |
"""simple docstring"""
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
__a = logging.get_logger(__name__)
enable_full_determinism()
class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_A : List[Any] = UNetaDModel
_A : Union[str, Any] = """sample"""
@property
def lowerCAmelCase_ ( self: str ) -> Tuple:
snake_case_ :List[str] = 4
snake_case_ :Tuple = 3
snake_case_ :Optional[Any] = (32, 32)
snake_case_ :str = floats_tensor((batch_size, num_channels) + sizes ).to(snake_case )
snake_case_ :Union[str, Any] = torch.tensor([10] ).to(snake_case )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase_ ( self: List[str] ) -> Dict:
return (3, 32, 32)
@property
def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[int]:
return (3, 32, 32)
def lowerCAmelCase_ ( self: Optional[int] ) -> Dict:
snake_case_ :Any = {
"""block_out_channels""": (32, 64),
"""down_block_types""": ("""DownBlock2D""", """AttnDownBlock2D"""),
"""up_block_types""": ("""AttnUpBlock2D""", """UpBlock2D"""),
"""attention_head_dim""": 3,
"""out_channels""": 3,
"""in_channels""": 3,
"""layers_per_block""": 2,
"""sample_size""": 32,
}
snake_case_ :Tuple = self.dummy_input
return init_dict, inputs_dict
class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_A : List[str] = UNetaDModel
_A : Union[str, Any] = """sample"""
@property
def lowerCAmelCase_ ( self: str ) -> str:
snake_case_ :List[str] = 4
snake_case_ :Optional[int] = 4
snake_case_ :int = (32, 32)
snake_case_ :Any = floats_tensor((batch_size, num_channels) + sizes ).to(snake_case )
snake_case_ :List[Any] = torch.tensor([10] ).to(snake_case )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[int]:
return (4, 32, 32)
@property
def lowerCAmelCase_ ( self: List[Any] ) -> int:
return (4, 32, 32)
def lowerCAmelCase_ ( self: Union[str, Any] ) -> List[Any]:
snake_case_ :Dict = {
"""sample_size""": 32,
"""in_channels""": 4,
"""out_channels""": 4,
"""layers_per_block""": 2,
"""block_out_channels""": (32, 64),
"""attention_head_dim""": 32,
"""down_block_types""": ("""DownBlock2D""", """DownBlock2D"""),
"""up_block_types""": ("""UpBlock2D""", """UpBlock2D"""),
}
snake_case_ :List[str] = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[Any]:
snake_case_, snake_case_ :List[str] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=snake_case )
self.assertIsNotNone(snake_case )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(snake_case )
snake_case_ :List[str] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def lowerCAmelCase_ ( self: Tuple ) -> Dict:
snake_case_, snake_case_ :Union[str, Any] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=snake_case )
model.to(snake_case )
snake_case_ :Union[str, Any] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def lowerCAmelCase_ ( self: str ) -> Any:
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
snake_case_, snake_case_ :List[str] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=snake_case )
model_accelerate.to(snake_case )
model_accelerate.eval()
snake_case_ :List[Any] = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
snake_case_ :int = noise.to(snake_case )
snake_case_ :str = torch.tensor([10] * noise.shape[0] ).to(snake_case )
snake_case_ :Optional[int] = model_accelerate(snake_case , snake_case )["""sample"""]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
snake_case_, snake_case_ :str = UNetaDModel.from_pretrained(
"""fusing/unet-ldm-dummy-update""" , output_loading_info=snake_case , low_cpu_mem_usage=snake_case )
model_normal_load.to(snake_case )
model_normal_load.eval()
snake_case_ :int = model_normal_load(snake_case , snake_case )["""sample"""]
assert torch_all_close(snake_case , snake_case , rtol=1E-3 )
def lowerCAmelCase_ ( self: Tuple ) -> Any:
snake_case_ :Tuple = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" )
model.eval()
model.to(snake_case )
snake_case_ :Optional[int] = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
snake_case_ :int = noise.to(snake_case )
snake_case_ :List[Any] = torch.tensor([10] * noise.shape[0] ).to(snake_case )
with torch.no_grad():
snake_case_ :Union[str, Any] = model(snake_case , snake_case ).sample
snake_case_ :Optional[int] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
snake_case_ :Dict = torch.tensor([-1_3.3_2_5_8, -2_0.1_1_0_0, -1_5.9_8_7_3, -1_7.6_6_1_7, -2_3.0_5_9_6, -1_7.9_4_1_9, -1_3.3_6_7_5, -1_6.1_8_8_9, -1_2.3_8_0_0] )
# fmt: on
self.assertTrue(torch_all_close(snake_case , snake_case , rtol=1E-3 ) )
class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_A : List[Any] = UNetaDModel
_A : List[Any] = """sample"""
@property
def lowerCAmelCase_ ( self: Union[str, Any] , snake_case: int=(32, 32) ) -> Tuple:
snake_case_ :Union[str, Any] = 4
snake_case_ :Any = 3
snake_case_ :int = floats_tensor((batch_size, num_channels) + sizes ).to(snake_case )
snake_case_ :Any = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=snake_case )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Any:
return (3, 32, 32)
@property
def lowerCAmelCase_ ( self: int ) -> Tuple:
return (3, 32, 32)
def lowerCAmelCase_ ( self: List[str] ) -> Tuple:
snake_case_ :List[Any] = {
"""block_out_channels""": [32, 64, 64, 64],
"""in_channels""": 3,
"""layers_per_block""": 1,
"""out_channels""": 3,
"""time_embedding_type""": """fourier""",
"""norm_eps""": 1E-6,
"""mid_block_scale_factor""": math.sqrt(2.0 ),
"""norm_num_groups""": None,
"""down_block_types""": [
"""SkipDownBlock2D""",
"""AttnSkipDownBlock2D""",
"""SkipDownBlock2D""",
"""SkipDownBlock2D""",
],
"""up_block_types""": [
"""SkipUpBlock2D""",
"""SkipUpBlock2D""",
"""AttnSkipUpBlock2D""",
"""SkipUpBlock2D""",
],
}
snake_case_ :int = self.dummy_input
return init_dict, inputs_dict
@slow
def lowerCAmelCase_ ( self: Optional[Any] ) -> List[Any]:
snake_case_, snake_case_ :List[Any] = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" , output_loading_info=snake_case )
self.assertIsNotNone(snake_case )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(snake_case )
snake_case_ :Any = self.dummy_input
snake_case_ :int = floats_tensor((4, 3) + (256, 256) ).to(snake_case )
snake_case_ :int = noise
snake_case_ :int = model(**snake_case )
assert image is not None, "Make sure output is not None"
@slow
def lowerCAmelCase_ ( self: str ) -> Dict:
snake_case_ :Dict = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" )
model.to(snake_case )
snake_case_ :List[str] = 4
snake_case_ :Optional[int] = 3
snake_case_ :List[str] = (256, 256)
snake_case_ :Tuple = torch.ones((batch_size, num_channels) + sizes ).to(snake_case )
snake_case_ :str = torch.tensor(batch_size * [1E-4] ).to(snake_case )
with torch.no_grad():
snake_case_ :Dict = model(snake_case , snake_case ).sample
snake_case_ :int = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
snake_case_ :Optional[Any] = torch.tensor([-4_8_4_2.8_6_9_1, -6_4_9_9.6_6_3_1, -3_8_0_0.1_9_5_3, -7_9_7_8.2_6_8_6, -1_0_9_8_0.7_1_2_9, -2_0_0_2_8.8_5_3_5, 8_1_4_8.2_8_2_2, 2_3_4_2.2_9_0_5, 5_6_7.7_6_0_8] )
# fmt: on
self.assertTrue(torch_all_close(snake_case , snake_case , rtol=1E-2 ) )
def lowerCAmelCase_ ( self: List[str] ) -> List[Any]:
snake_case_ :Optional[Any] = UNetaDModel.from_pretrained("""fusing/ncsnpp-ffhq-ve-dummy-update""" )
model.to(snake_case )
snake_case_ :Optional[int] = 4
snake_case_ :Optional[Any] = 3
snake_case_ :Optional[Any] = (32, 32)
snake_case_ :Dict = torch.ones((batch_size, num_channels) + sizes ).to(snake_case )
snake_case_ :Any = torch.tensor(batch_size * [1E-4] ).to(snake_case )
with torch.no_grad():
snake_case_ :str = model(snake_case , snake_case ).sample
snake_case_ :int = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
snake_case_ :int = torch.tensor([-0.0_3_2_5, -0.0_9_0_0, -0.0_8_6_9, -0.0_3_3_2, -0.0_7_2_5, -0.0_2_7_0, -0.0_1_0_1, 0.0_2_2_7, 0.0_2_5_6] )
# fmt: on
self.assertTrue(torch_all_close(snake_case , snake_case , rtol=1E-2 ) )
def lowerCAmelCase_ ( self: Dict ) -> Optional[Any]:
# not required for this model
pass
| 66 |
'''simple docstring'''
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
UpperCamelCase = {
'''b0''': {
'''hidden_dim''': 1280,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 224,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1280,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 240,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 1408,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 260,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 1536,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 300,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 1792,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 380,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2048,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 456,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 2304,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 528,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 2560,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 600,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def SCREAMING_SNAKE_CASE( __lowercase ) -> Dict:
A: Tuple = EfficientNetConfig()
A: Optional[int] = CONFIG_MAP[model_name]['''hidden_dim''']
A: Optional[int] = CONFIG_MAP[model_name]['''width_coef''']
A: str = CONFIG_MAP[model_name]['''depth_coef''']
A: Dict = CONFIG_MAP[model_name]['''image_size''']
A: str = CONFIG_MAP[model_name]['''dropout_rate''']
A: Optional[Any] = CONFIG_MAP[model_name]['''dw_padding''']
A: Optional[Any] = '''huggingface/label-files'''
A: List[str] = '''imagenet-1k-id2label.json'''
A: Dict = 1_0_0_0
A: Any = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type='''dataset''' ) , '''r''' ) )
A: Tuple = {int(__lowercase ): v for k, v in idalabel.items()}
A: int = idalabel
A: Tuple = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE( ) -> Any:
A: Optional[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
A: Union[str, Any] = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
def SCREAMING_SNAKE_CASE( __lowercase ) -> Tuple:
A: List[str] = CONFIG_MAP[model_name]['''image_size''']
A: List[Any] = EfficientNetImageProcessor(
size={'''height''': size, '''width''': size} , image_mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , image_std=[0.4_7_8_5_3_9_4_4, 0.4_7_3_2_8_6_4, 0.4_7_4_3_4_1_6_3] , do_center_crop=__lowercase , )
return preprocessor
def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[int]:
A: List[str] = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )]
A: List[str] = sorted(set(__lowercase ) )
A: Dict = len(__lowercase )
A: List[str] = {b: str(__lowercase ) for b, i in zip(__lowercase , range(__lowercase ) )}
A: Optional[int] = []
rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') )
rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') )
rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') )
rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') )
rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') )
for b in block_names:
A: int = block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') )
rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') )
rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') )
rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') )
rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') )
A: Union[str, Any] = {}
for item in rename_keys:
if item[0] in original_param_names:
A: str = '''efficientnet.''' + item[1]
A: int = '''classifier.weight'''
A: Tuple = '''classifier.bias'''
return key_mapping
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> Tuple:
for key, value in tf_params.items():
if "normalization" in key:
continue
A: Union[str, Any] = key_mapping[key]
if "_conv" in key and "kernel" in key:
A: List[str] = torch.from_numpy(__lowercase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
A: List[Any] = torch.from_numpy(__lowercase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
A: Optional[Any] = torch.from_numpy(np.transpose(__lowercase ) )
else:
A: Any = torch.from_numpy(__lowercase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(__lowercase )
@torch.no_grad()
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase ) -> Tuple:
A: Optional[int] = model_classes[model_name](
include_top=__lowercase , weights='''imagenet''' , input_tensor=__lowercase , input_shape=__lowercase , pooling=__lowercase , classes=1_0_0_0 , classifier_activation='''softmax''' , )
A: List[str] = original_model.trainable_variables
A: Optional[Any] = original_model.non_trainable_variables
A: Union[str, Any] = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
A: int = param.numpy()
A: Tuple = list(tf_params.keys() )
# Load HuggingFace model
A: Dict = get_efficientnet_config(__lowercase )
A: Union[str, Any] = EfficientNetForImageClassification(__lowercase ).eval()
A: Dict = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('''Converting parameters...''' )
A: int = rename_keys(__lowercase )
replace_params(__lowercase , __lowercase , __lowercase )
# Initialize preprocessor and preprocess input image
A: List[Any] = convert_image_processor(__lowercase )
A: Optional[Any] = preprocessor(images=prepare_img() , return_tensors='''pt''' )
# HF model inference
hf_model.eval()
with torch.no_grad():
A: str = hf_model(**__lowercase )
A: List[Any] = outputs.logits.detach().numpy()
# Original model inference
A: Any = False
A: List[Any] = CONFIG_MAP[model_name]['''image_size''']
A: List[Any] = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
A: str = image.img_to_array(__lowercase )
A: Dict = np.expand_dims(__lowercase , axis=0 )
A: Any = original_model.predict(__lowercase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(__lowercase , __lowercase , atol=1E-3 ), "The predicted logits are not the same."
print('''Model outputs match!''' )
if save_model:
# Create folder to save model
if not os.path.isdir(__lowercase ):
os.mkdir(__lowercase )
# Save converted model and image processor
hf_model.save_pretrained(__lowercase )
preprocessor.save_pretrained(__lowercase )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
A: int = F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(__lowercase )
hf_model.push_to_hub(__lowercase )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
UpperCamelCase = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 319 | 0 |
"""simple docstring"""
def _A (__a = 10 , __a = 10_00 , __a = True ) -> int:
"""simple docstring"""
assert (
isinstance(__a , __a )
and isinstance(__a , __a )
and isinstance(__a , __a )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError('''Invalid value for min_val or max_val (min_value < max_value)''' )
return min_val if option else max_val
def _A (__a , __a ) -> int:
"""simple docstring"""
return int((number_a + number_a) / 2 )
def _A (__a , __a , __a ) -> None:
"""simple docstring"""
assert (
isinstance(__a , __a ) and isinstance(__a , __a ) and isinstance(__a , __a )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError('''argument value for lower and higher must be(lower > higher)''' )
if not lower < to_guess < higher:
raise ValueError(
'''guess value must be within the range of lower and higher value''' )
def answer(__a ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print('''started...''' )
SCREAMING_SNAKE_CASE_ : Any = lower
SCREAMING_SNAKE_CASE_ : str = higher
SCREAMING_SNAKE_CASE_ : List[str] = []
while True:
SCREAMING_SNAKE_CASE_ : int = get_avg(__a , __a )
last_numbers.append(__a )
if answer(__a ) == "low":
SCREAMING_SNAKE_CASE_ : Optional[int] = number
elif answer(__a ) == "high":
SCREAMING_SNAKE_CASE_ : List[str] = number
else:
break
print(f'guess the number : {last_numbers[-1]}' )
print(f'details : {last_numbers!s}' )
def _A () -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = int(input('''Enter lower value : ''' ).strip() )
SCREAMING_SNAKE_CASE_ : str = int(input('''Enter high value : ''' ).strip() )
SCREAMING_SNAKE_CASE_ : Optional[Any] = int(input('''Enter value to guess : ''' ).strip() )
guess_the_number(__a , __a , __a )
if __name__ == "__main__":
main()
| 318 |
"""simple docstring"""
UpperCAmelCase_ : List[Any] = 9.8_0_6_6_5
def _A (__a , __a , __a = g ) -> float:
"""simple docstring"""
if fluid_density <= 0:
raise ValueError('''Impossible fluid density''' )
if volume < 0:
raise ValueError('''Impossible Object volume''' )
if gravity <= 0:
raise ValueError('''Impossible Gravity''' )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 318 | 1 |
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
@register_to_config
def __init__( self , SCREAMING_SNAKE_CASE_ = 768 , )-> Any:
'''simple docstring'''
super().__init__()
__UpperCamelCase = nn.Parameter(torch.zeros(1 , SCREAMING_SNAKE_CASE_ ) )
__UpperCamelCase = nn.Parameter(torch.ones(1 , SCREAMING_SNAKE_CASE_ ) )
def A__ ( self , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , )-> str:
'''simple docstring'''
__UpperCamelCase = nn.Parameter(self.mean.to(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ) )
__UpperCamelCase = nn.Parameter(self.std.to(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ) )
return self
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> Optional[Any]:
'''simple docstring'''
__UpperCamelCase = (embeds - self.mean) * 1.0 / self.std
return embeds
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> Optional[int]:
'''simple docstring'''
__UpperCamelCase = (embeds * self.std) + self.mean
return embeds
| 328 |
from __future__ import annotations
from collections import deque
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ )-> int:
'''simple docstring'''
__UpperCamelCase = []
self.adlist.append(
{'''value''': '''''', '''next_states''': [], '''fail_state''': 0, '''output''': []} )
for keyword in keywords:
self.add_keyword(SCREAMING_SNAKE_CASE_ )
self.set_fail_transitions()
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> int | None:
'''simple docstring'''
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> None:
'''simple docstring'''
__UpperCamelCase = 0
for character in keyword:
__UpperCamelCase = self.find_next_state(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if next_state is None:
self.adlist.append(
{
'''value''': character,
'''next_states''': [],
'''fail_state''': 0,
'''output''': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
__UpperCamelCase = len(self.adlist ) - 1
else:
__UpperCamelCase = next_state
self.adlist[current_state]["output"].append(SCREAMING_SNAKE_CASE_ )
def A__ ( self )-> None:
'''simple docstring'''
__UpperCamelCase = deque()
for node in self.adlist[0]["next_states"]:
q.append(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = 0
while q:
__UpperCamelCase = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = self.adlist[r]['''fail_state''']
while (
self.find_next_state(SCREAMING_SNAKE_CASE_ , self.adlist[child]['''value'''] ) is None
and state != 0
):
__UpperCamelCase = self.adlist[state]['''fail_state''']
__UpperCamelCase = self.find_next_state(
SCREAMING_SNAKE_CASE_ , self.adlist[child]['''value'''] )
if self.adlist[child]["fail_state"] is None:
__UpperCamelCase = 0
__UpperCamelCase = (
self.adlist[child]['''output''']
+ self.adlist[self.adlist[child]['''fail_state''']]['''output''']
)
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> dict[str, list[int]]:
'''simple docstring'''
__UpperCamelCase = {} # returns a dict with keywords and list of its occurrences
__UpperCamelCase = 0
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
while (
self.find_next_state(SCREAMING_SNAKE_CASE_ , string[i] ) is None
and current_state != 0
):
__UpperCamelCase = self.adlist[current_state]['''fail_state''']
__UpperCamelCase = self.find_next_state(SCREAMING_SNAKE_CASE_ , string[i] )
if next_state is None:
__UpperCamelCase = 0
else:
__UpperCamelCase = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
__UpperCamelCase = []
result[key].append(i - len(SCREAMING_SNAKE_CASE_ ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 328 | 1 |
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class __magic_name__ ( lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE = BlenderbotSmallTokenizer
SCREAMING_SNAKE_CASE = False
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
__a =['__start__', 'adapt', 'act', 'ap@@', 'te', '__end__', '__unk__']
__a =dict(zip(__snake_case , range(len(__snake_case ) ) ) )
__a =['#version: 0.2', 'a p', 't e</w>', 'ap t</w>', 'a d', 'ad apt</w>', 'a c', 'ac t</w>', '']
__a ={'unk_token': '__unk__', 'bos_token': '__start__', 'eos_token': '__end__'}
__a =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__a =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__snake_case ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__snake_case ) )
def __magic_name__ ( self , **__snake_case ) -> Any:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__snake_case )
def __magic_name__ ( self , __snake_case ) -> List[Any]:
'''simple docstring'''
__a ='adapt act apte'
__a ='adapt act apte'
return input_text, output_text
def __magic_name__ ( self ) -> str:
'''simple docstring'''
__a =BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__a ='adapt act apte'
__a =['adapt', 'act', 'ap@@', 'te']
__a =tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
__a =[tokenizer.bos_token] + tokens + [tokenizer.eos_token]
__a =[0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def __magic_name__ ( self ) -> str:
'''simple docstring'''
__a =BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
assert tok('sam' ).input_ids == [1384]
__a ='I am a small frog.'
__a =tok([src_text] , padding=__snake_case , truncation=__snake_case )['input_ids']
__a =tok.batch_decode(__snake_case , skip_special_tokens=__snake_case , clean_up_tokenization_spaces=__snake_case )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def __magic_name__ ( self ) -> str:
'''simple docstring'''
__a =BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
__a ='I am a small frog .'
__a ='.'
__a =tok(__snake_case )['input_ids']
__a =tok(__snake_case )['input_ids']
assert encoded[-1] == encoded_dot[0]
| 370 |
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class __magic_name__ :
def __init__( self , __snake_case , __snake_case=13 , __snake_case=7 , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=False , __snake_case=False , __snake_case=False , __snake_case=2 , __snake_case=99 , __snake_case=0 , __snake_case=32 , __snake_case=5 , __snake_case=4 , __snake_case=0.1 , __snake_case=0.1 , __snake_case=512 , __snake_case=2 , __snake_case=0.02 , __snake_case=2 , __snake_case=4 , __snake_case="last" , __snake_case=True , __snake_case=None , __snake_case=0 , ) -> Optional[Any]:
'''simple docstring'''
__a =parent
__a =batch_size
__a =seq_length
__a =is_training
__a =use_input_lengths
__a =use_token_type_ids
__a =use_labels
__a =gelu_activation
__a =sinusoidal_embeddings
__a =causal
__a =asm
__a =n_langs
__a =vocab_size
__a =n_special
__a =hidden_size
__a =num_hidden_layers
__a =num_attention_heads
__a =hidden_dropout_prob
__a =attention_probs_dropout_prob
__a =max_position_embeddings
__a =type_sequence_label_size
__a =initializer_range
__a =num_labels
__a =num_choices
__a =summary_type
__a =use_proj
__a =scope
__a =bos_token_id
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
__a =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a =random_attention_mask([self.batch_size, self.seq_length] )
__a =None
if self.use_input_lengths:
__a =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__a =None
if self.use_token_type_ids:
__a =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__a =None
__a =None
__a =None
if self.use_labels:
__a =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a =ids_tensor([self.batch_size] , 2 ).float()
__a =ids_tensor([self.batch_size] , self.num_choices )
__a =self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> List[Any]:
'''simple docstring'''
__a =XLMModel(config=__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case , lengths=__snake_case , langs=__snake_case )
__a =model(__snake_case , langs=__snake_case )
__a =model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> Optional[int]:
'''simple docstring'''
__a =XLMWithLMHeadModel(__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> Dict:
'''simple docstring'''
__a =XLMForQuestionAnsweringSimple(__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case )
__a =model(__snake_case , start_positions=__snake_case , end_positions=__snake_case )
__a =outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> List[Any]:
'''simple docstring'''
__a =XLMForQuestionAnswering(__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case )
__a =model(
__snake_case , start_positions=__snake_case , end_positions=__snake_case , cls_index=__snake_case , is_impossible=__snake_case , p_mask=__snake_case , )
__a =model(
__snake_case , start_positions=__snake_case , end_positions=__snake_case , cls_index=__snake_case , is_impossible=__snake_case , )
((__a) , ) =result_with_labels.to_tuple()
__a =model(__snake_case , start_positions=__snake_case , end_positions=__snake_case )
((__a) , ) =result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> Optional[Any]:
'''simple docstring'''
__a =XLMForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case )
__a =model(__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> Any:
'''simple docstring'''
__a =self.num_labels
__a =XLMForTokenClassification(__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> Tuple:
'''simple docstring'''
__a =self.num_choices
__a =XLMForMultipleChoice(config=__snake_case )
model.to(__snake_case )
model.eval()
__a =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a =model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) =config_and_inputs
__a ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
SCREAMING_SNAKE_CASE = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) -> int:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case=False ) -> str:
'''simple docstring'''
__a =super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
__a =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
__a =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
return inputs_dict
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
__a =XLMModelTester(self )
__a =ConfigTester(self , config_class=__snake_case , emb_dim=37 )
def __magic_name__ ( self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*__snake_case )
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*__snake_case )
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*__snake_case )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*__snake_case )
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*__snake_case )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*__snake_case )
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*__snake_case )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=False , __snake_case=1 ) -> Optional[Any]:
'''simple docstring'''
self.assertIsInstance(__snake_case , __snake_case )
self.assertListEqual(
[isinstance(__snake_case , __snake_case ) for iter_attentions in attentions] , [True] * len(__snake_case ) )
self.assertEqual(len(__snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(__snake_case ):
# adds PAD dummy token
__a =min_length + idx + 1
__a =min_length + idx + 1
__a =(
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(__snake_case ) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=False , __snake_case=1 ) -> Dict:
'''simple docstring'''
self.assertIsInstance(__snake_case , __snake_case )
self.assertListEqual(
[isinstance(__snake_case , __snake_case ) for iter_hidden_states in hidden_states] , [True] * len(__snake_case ) , )
self.assertEqual(len(__snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(__snake_case ):
# adds PAD dummy token
__a =min_length + idx + 1
__a =(batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(__snake_case ) , )
pass
@slow
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a =XLMModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@require_torch
class __magic_name__ ( unittest.TestCase ):
@slow
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
__a =XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(__snake_case )
__a =torch.tensor([[14, 447]] , dtype=torch.long , device=__snake_case ) # the president
__a =[
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
__a =model.generate(__snake_case , do_sample=__snake_case )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , __snake_case )
| 308 | 0 |
"""simple docstring"""
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowercase :
@staticmethod
def _snake_case ( *lowercase , **lowercase ) -> Any:
pass
@is_pipeline_test
@require_vision
class lowercase ( unittest.TestCase ):
@require_torch
def _snake_case ( self ) -> List[str]:
lowerCAmelCase = pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" , )
lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowerCAmelCase = image_classifier(lowercase , candidate_labels=["""a""", """b""", """c"""] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(lowercase ) , [
[{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """b"""}, {"""score""": 0.333, """label""": """c"""}],
[{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """c"""}, {"""score""": 0.333, """label""": """b"""}],
] , )
lowerCAmelCase = image_classifier([image] * 5 , candidate_labels=["""A""", """B""", """C"""] , batch_size=2 )
self.assertEqual(
nested_simplify(lowercase ) , [
[
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
],
[
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
],
[
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
],
[
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
],
[
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
],
] , )
@require_tf
def _snake_case ( self ) -> List[str]:
lowerCAmelCase = pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" , framework="""tf""" )
lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowerCAmelCase = image_classifier(lowercase , candidate_labels=["""a""", """b""", """c"""] )
self.assertEqual(
nested_simplify(lowercase ) , [{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """b"""}, {"""score""": 0.333, """label""": """c"""}] , )
lowerCAmelCase = image_classifier([image] * 5 , candidate_labels=["""A""", """B""", """C"""] , batch_size=2 )
self.assertEqual(
nested_simplify(lowercase ) , [
[
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
],
[
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
],
[
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
],
[
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
],
[
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
],
] , )
@slow
@require_torch
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = pipeline(
task="""zero-shot-image-classification""" , model="""openai/clip-vit-base-patch32""" , )
# This is an image of 2 cats with remotes and no planes
lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowerCAmelCase = image_classifier(lowercase , candidate_labels=["""cat""", """plane""", """remote"""] )
self.assertEqual(
nested_simplify(lowercase ) , [
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
] , )
lowerCAmelCase = image_classifier([image] * 5 , candidate_labels=["""cat""", """plane""", """remote"""] , batch_size=2 )
self.assertEqual(
nested_simplify(lowercase ) , [
[
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
],
]
* 5 , )
@slow
@require_tf
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = pipeline(
task="""zero-shot-image-classification""" , model="""openai/clip-vit-base-patch32""" , framework="""tf""" )
# This is an image of 2 cats with remotes and no planes
lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowerCAmelCase = image_classifier(lowercase , candidate_labels=["""cat""", """plane""", """remote"""] )
self.assertEqual(
nested_simplify(lowercase ) , [
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
] , )
lowerCAmelCase = image_classifier([image] * 5 , candidate_labels=["""cat""", """plane""", """remote"""] , batch_size=2 )
self.assertEqual(
nested_simplify(lowercase ) , [
[
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
],
]
* 5 , )
| 46 |
'''simple docstring'''
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
UpperCAmelCase : Optional[Any] = None
try:
import msvcrt
except ImportError:
UpperCAmelCase : List[Any] = None
try:
import fcntl
except ImportError:
UpperCAmelCase : int = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
UpperCAmelCase : Union[str, Any] = OSError
# Data
# ------------------------------------------------
UpperCAmelCase : List[Any] = [
'Timeout',
'BaseFileLock',
'WindowsFileLock',
'UnixFileLock',
'SoftFileLock',
'FileLock',
]
UpperCAmelCase : Tuple = '3.0.12'
UpperCAmelCase : str = None
def a__ ( ):
"""simple docstring"""
global _logger
__SCREAMING_SNAKE_CASE = _logger or logging.getLogger(__name__ )
return _logger
class lowerCAmelCase__ ( a ):
"""simple docstring"""
def __init__( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = lock_file
return None
def __str__( self : str ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = f'The file lock \'{self.lock_file}\' could not be acquired.'
return temp
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = lock
return None
def __enter__( self : List[str] ) -> List[Any]:
"""simple docstring"""
return self.lock
def __exit__( self : Tuple , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[str]:
"""simple docstring"""
self.lock.release()
return None
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : str=-1 , __SCREAMING_SNAKE_CASE : Optional[Any]=None ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
__SCREAMING_SNAKE_CASE = self.hash_filename_if_too_long(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# The path to the lock file.
__SCREAMING_SNAKE_CASE = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
__SCREAMING_SNAKE_CASE = None
# The default timeout value.
__SCREAMING_SNAKE_CASE = timeout
# We use this lock primarily for the lock counter.
__SCREAMING_SNAKE_CASE = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
__SCREAMING_SNAKE_CASE = 0
return None
@property
def UpperCAmelCase__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
return self._lock_file
@property
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return self._timeout
@timeout.setter
def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : Dict ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = float(__SCREAMING_SNAKE_CASE )
return None
def UpperCAmelCase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
raise NotImplementedError()
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
raise NotImplementedError()
@property
def UpperCAmelCase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return self._lock_file_fd is not None
def UpperCAmelCase__ ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Optional[int]=0.05 ) -> Optional[Any]:
"""simple docstring"""
if timeout is None:
__SCREAMING_SNAKE_CASE = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
__SCREAMING_SNAKE_CASE = id(self )
__SCREAMING_SNAKE_CASE = self._lock_file
__SCREAMING_SNAKE_CASE = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f'Attempting to acquire lock {lock_id} on {lock_filename}' )
self._acquire()
if self.is_locked:
logger().debug(f'Lock {lock_id} acquired on {lock_filename}' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f'Timeout on acquiring lock {lock_id} on {lock_filename}' )
raise Timeout(self._lock_file )
else:
logger().debug(
f'Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...' )
time.sleep(__SCREAMING_SNAKE_CASE )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
__SCREAMING_SNAKE_CASE = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def UpperCAmelCase__ ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=False ) -> Dict:
"""simple docstring"""
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
__SCREAMING_SNAKE_CASE = id(self )
__SCREAMING_SNAKE_CASE = self._lock_file
logger().debug(f'Attempting to release lock {lock_id} on {lock_filename}' )
self._release()
__SCREAMING_SNAKE_CASE = 0
logger().debug(f'Lock {lock_id} released on {lock_filename}' )
return None
def __enter__( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
self.acquire()
return self
def __exit__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any ) -> Tuple:
"""simple docstring"""
self.release()
return None
def __del__( self : str ) -> Union[str, Any]:
"""simple docstring"""
self.release(force=__SCREAMING_SNAKE_CASE )
return None
def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = os.path.basename(__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > max_length and max_length > 0:
__SCREAMING_SNAKE_CASE = os.path.dirname(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = str(hash(__SCREAMING_SNAKE_CASE ) )
__SCREAMING_SNAKE_CASE = filename[: max_length - len(__SCREAMING_SNAKE_CASE ) - 8] + """...""" + hashed_filename + """.lock"""
return os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
return path
class lowerCAmelCase__ ( a ):
"""simple docstring"""
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Dict=-1 , __SCREAMING_SNAKE_CASE : Dict=None ) -> List[Any]:
"""simple docstring"""
from .file_utils import relative_to_absolute_path
super().__init__(__SCREAMING_SNAKE_CASE , timeout=__SCREAMING_SNAKE_CASE , max_filename_length=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = """\\\\?\\""" + relative_to_absolute_path(self.lock_file )
def UpperCAmelCase__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
__SCREAMING_SNAKE_CASE = os.open(self._lock_file , __SCREAMING_SNAKE_CASE )
except OSError:
pass
else:
try:
msvcrt.locking(__SCREAMING_SNAKE_CASE , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(__SCREAMING_SNAKE_CASE )
else:
__SCREAMING_SNAKE_CASE = fd
return None
def UpperCAmelCase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self._lock_file_fd
__SCREAMING_SNAKE_CASE = None
msvcrt.locking(__SCREAMING_SNAKE_CASE , msvcrt.LK_UNLCK , 1 )
os.close(__SCREAMING_SNAKE_CASE )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class lowerCAmelCase__ ( a ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any]=-1 , __SCREAMING_SNAKE_CASE : Optional[Any]=None ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = os.statvfs(os.path.dirname(__SCREAMING_SNAKE_CASE ) ).f_namemax
super().__init__(__SCREAMING_SNAKE_CASE , timeout=__SCREAMING_SNAKE_CASE , max_filename_length=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = os.O_RDWR | os.O_CREAT | os.O_TRUNC
__SCREAMING_SNAKE_CASE = os.open(self._lock_file , __SCREAMING_SNAKE_CASE )
try:
fcntl.flock(__SCREAMING_SNAKE_CASE , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(__SCREAMING_SNAKE_CASE )
else:
__SCREAMING_SNAKE_CASE = fd
return None
def UpperCAmelCase__ ( self : List[Any] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self._lock_file_fd
__SCREAMING_SNAKE_CASE = None
fcntl.flock(__SCREAMING_SNAKE_CASE , fcntl.LOCK_UN )
os.close(__SCREAMING_SNAKE_CASE )
return None
class lowerCAmelCase__ ( a ):
"""simple docstring"""
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
__SCREAMING_SNAKE_CASE = os.open(self._lock_file , __SCREAMING_SNAKE_CASE )
except OSError:
pass
else:
__SCREAMING_SNAKE_CASE = fd
return None
def UpperCAmelCase__ ( self : int ) -> Optional[int]:
"""simple docstring"""
os.close(self._lock_file_fd )
__SCREAMING_SNAKE_CASE = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
UpperCAmelCase : Dict = None
if msvcrt:
UpperCAmelCase : Optional[int] = WindowsFileLock
elif fcntl:
UpperCAmelCase : Optional[Any] = UnixFileLock
else:
UpperCAmelCase : int = SoftFileLock
if warnings is not None:
warnings.warn('only soft file lock is available')
| 267 | 0 |
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class snake_case_ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
lowercase__ : Tuple = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small" )
lowercase__ : List[Any] = AutoTokenizer.from_pretrained("google/mt5-small" )
lowercase__ : Tuple = tokenizer("Hello there" , return_tensors="np" ).input_ids
lowercase__ : List[str] = tokenizer("Hi I am" , return_tensors="np" ).input_ids
lowercase__ : str = shift_tokens_right(a_ , model.config.pad_token_id , model.config.decoder_start_token_id )
lowercase__ : Dict = model(a_ , decoder_input_ids=a_ ).logits
lowercase__ : Union[str, Any] = optax.softmax_cross_entropy(a_ , onehot(a_ , logits.shape[-1] ) ).mean()
lowercase__ : Optional[Any] = -(labels.shape[-1] * loss.item())
lowercase__ : Dict = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 359 | from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : Dict , _lowerCamelCase : Tuple):
for param, grad_param in zip(model_a.parameters() , model_b.parameters()):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int] , _lowerCamelCase : int , _lowerCamelCase : Union[str, Any]=True):
model.train()
lowercase__ : Tuple = model(_lowerCamelCase)
lowercase__ : Union[str, Any] = F.mse_loss(_lowerCamelCase , target.to(output.device))
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : str=False):
set_seed(42)
lowercase__ : Dict = RegressionModel()
lowercase__ : int = deepcopy(_lowerCamelCase)
lowercase__ : str = RegressionDataset(length=80)
lowercase__ : List[Any] = DataLoader(_lowerCamelCase , batch_size=16)
model.to(accelerator.device)
if sched:
lowercase__ : Union[str, Any] = AdamW(params=model.parameters() , lr=1E-3)
lowercase__ : Union[str, Any] = AdamW(params=ddp_model.parameters() , lr=1E-3)
lowercase__ : Optional[int] = LambdaLR(_lowerCamelCase , lr_lambda=lambda _lowerCamelCase: epoch**0.65)
lowercase__ : Union[str, Any] = LambdaLR(_lowerCamelCase , lr_lambda=lambda _lowerCamelCase: epoch**0.65)
# Make a copy of `model`
if sched:
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Tuple = accelerator.prepare(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
else:
lowercase__ , lowercase__ : int = accelerator.prepare(_lowerCamelCase , _lowerCamelCase)
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def lowercase_ ( _lowerCamelCase : Tuple):
# Test when on a single CPU or GPU that the context manager does nothing
lowercase__ , lowercase__ , lowercase__ : List[Any] = get_training_setup(_lowerCamelCase)
# Use a single batch
lowercase__ , lowercase__ : int = next(iter(_lowerCamelCase)).values()
for iteration in range(3):
# Gather the distributed inputs and targs for the base model
lowercase__ , lowercase__ : Optional[int] = accelerator.gather((ddp_input, ddp_target))
lowercase__ , lowercase__ : Union[str, Any] = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_lowerCamelCase):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
else:
# Sync grads
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters()):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration)
lowercase__ : int = ddp_input[torch.randperm(len(_lowerCamelCase))]
def lowercase_ ( _lowerCamelCase : Any):
# Test on distributed setup that context manager behaves properly
lowercase__ , lowercase__ , lowercase__ : str = get_training_setup(_lowerCamelCase)
# Use a single batch
lowercase__ , lowercase__ : Dict = next(iter(_lowerCamelCase)).values()
for iteration in range(3):
# Gather the distributed inputs and targs for the base model
lowercase__ , lowercase__ : List[str] = accelerator.gather((ddp_input, ddp_target))
lowercase__ , lowercase__ : Any = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_lowerCamelCase):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
else:
# Sync grads
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters()):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is False
), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is True
), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration)
lowercase__ : Tuple = ddp_input[torch.randperm(len(_lowerCamelCase))]
def lowercase_ ( _lowerCamelCase : Optional[Any]=False , _lowerCamelCase : Union[str, Any]=False):
lowercase__ : int = Accelerator(
split_batches=_lowerCamelCase , dispatch_batches=_lowerCamelCase , gradient_accumulation_steps=2)
# Test that context manager behaves properly
lowercase__ , lowercase__ , lowercase__ : Optional[int] = get_training_setup(_lowerCamelCase)
for iteration, batch in enumerate(_lowerCamelCase):
lowercase__ , lowercase__ : str = batch.values()
# Gather the distributed inputs and targs for the base model
lowercase__ , lowercase__ : Optional[Any] = accelerator.gather((ddp_input, ddp_target))
lowercase__ , lowercase__ : Union[str, Any] = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Do "gradient accumulation" (noop)
with accelerator.accumulate(_lowerCamelCase):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters()):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(_lowerCamelCase) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration)
lowercase__ : Dict = ddp_input[torch.randperm(len(_lowerCamelCase))]
GradientState._reset_state()
def lowercase_ ( _lowerCamelCase : List[str]=False , _lowerCamelCase : int=False):
lowercase__ : Dict = Accelerator(
split_batches=_lowerCamelCase , dispatch_batches=_lowerCamelCase , gradient_accumulation_steps=2)
# Test that context manager behaves properly
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = get_training_setup(_lowerCamelCase , _lowerCamelCase)
for iteration, batch in enumerate(_lowerCamelCase):
lowercase__ , lowercase__ : Any = batch.values()
# Gather the distributed inputs and targs for the base model
lowercase__ , lowercase__ : Tuple = accelerator.gather((ddp_input, ddp_target))
lowercase__ , lowercase__ : List[str] = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(_lowerCamelCase)):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(_lowerCamelCase):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'''
lowercase__ : Tuple = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(_lowerCamelCase))
if accelerator.num_processes > 1:
check_model_parameters(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration)
GradientState._reset_state()
def lowercase_ ( ):
lowercase__ : List[str] = Accelerator()
lowercase__ : List[Any] = RegressionDataset(length=80)
lowercase__ : Tuple = DataLoader(_lowerCamelCase , batch_size=16)
lowercase__ : int = RegressionDataset(length=96)
lowercase__ : List[str] = DataLoader(_lowerCamelCase , batch_size=16)
lowercase__ , lowercase__ : Dict = accelerator.prepare(_lowerCamelCase , _lowerCamelCase)
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(_lowerCamelCase):
assert id(accelerator.gradient_state.active_dataloader) == id(_lowerCamelCase)
if iteration < len(_lowerCamelCase) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(_lowerCamelCase):
assert id(accelerator.gradient_state.active_dataloader) == id(_lowerCamelCase)
if batch_num < len(_lowerCamelCase) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def lowercase_ ( ):
lowercase__ : str = Accelerator()
lowercase__ : Dict = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**")
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**")
test_noop_sync(_lowerCamelCase)
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**")
test_distributed_sync(_lowerCamelCase)
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(_lowerCamelCase , _lowerCamelCase)
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0") or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(_lowerCamelCase , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Any):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 333 | 0 |
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
a_ = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def __lowercase ( lowerCamelCase : Optional[Any] ):
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def __lowercase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : str ):
return max(metric_fn(lowerCamelCase , lowerCamelCase ) for gt in ground_truths )
def __lowercase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict , lowerCamelCase : Dict ):
UpperCamelCase_ : Tuple = [line.strip() for line in open(lowerCamelCase , 'r' ).readlines()]
UpperCamelCase_ : List[Any] = []
if args.gold_data_mode == "qa":
UpperCamelCase_ : Union[str, Any] = pd.read_csv(lowerCamelCase , sep='\t' , header=lowerCamelCase )
for answer_list in data[1]:
UpperCamelCase_ : Optional[int] = ast.literal_eval(lowerCamelCase )
answers.append(lowerCamelCase )
else:
UpperCamelCase_ : int = [line.strip() for line in open(lowerCamelCase , 'r' ).readlines()]
UpperCamelCase_ : Optional[int] = [[reference] for reference in references]
UpperCamelCase_ : Optional[int] = 0
for prediction, ground_truths in zip(lowerCamelCase , lowerCamelCase ):
total += 1
em += metric_max_over_ground_truths(lowerCamelCase , lowerCamelCase , lowerCamelCase )
fa += metric_max_over_ground_truths(lowerCamelCase , lowerCamelCase , lowerCamelCase )
UpperCamelCase_ : Union[str, Any] = 1_0_0.0 * em / total
UpperCamelCase_ : List[Any] = 1_0_0.0 * fa / total
logger.info(F"F1: {fa:.2f}" )
logger.info(F"EM: {em:.2f}" )
def __lowercase ( lowerCamelCase : Any , lowerCamelCase : int , lowerCamelCase : List[str] ):
UpperCamelCase_ : Optional[int] = args.k
UpperCamelCase_ : List[Any] = [line.strip() for line in open(lowerCamelCase , 'r' ).readlines()]
UpperCamelCase_ : List[str] = [line.strip() for line in open(lowerCamelCase , 'r' ).readlines()]
UpperCamelCase_ : List[str] = 0
for hypo, reference in zip(lowerCamelCase , lowerCamelCase ):
UpperCamelCase_ : List[str] = set(hypo.split('\t' )[:k] )
UpperCamelCase_ : int = set(reference.split('\t' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
UpperCamelCase_ : Union[str, Any] = 1_0_0.0 * em / total
logger.info(F"Precision@{k}: {em: .2f}" )
def __lowercase ( lowerCamelCase : Tuple , lowerCamelCase : Any , lowerCamelCase : Any ):
def strip_title(lowerCamelCase : List[str] ):
if title.startswith('"' ):
UpperCamelCase_ : List[str] = title[1:]
if title.endswith('"' ):
UpperCamelCase_ : int = title[:-1]
return title
UpperCamelCase_ : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
lowerCamelCase , return_tensors='pt' , padding=lowerCamelCase , truncation=lowerCamelCase , )['input_ids'].to(args.device )
UpperCamelCase_ : int = rag_model.rag.question_encoder(lowerCamelCase )
UpperCamelCase_ : List[str] = question_enc_outputs[0]
UpperCamelCase_ : Tuple = rag_model.retriever(
lowerCamelCase , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='pt' , )
UpperCamelCase_ : str = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
UpperCamelCase_ : int = []
for docs in all_docs:
UpperCamelCase_ : Union[str, Any] = [strip_title(lowerCamelCase ) for title in docs['title']]
provenance_strings.append('\t'.join(lowerCamelCase ) )
return provenance_strings
def __lowercase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any] , lowerCamelCase : List[Any] ):
with torch.no_grad():
UpperCamelCase_ : List[Any] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
lowerCamelCase , return_tensors='pt' , padding=lowerCamelCase , truncation=lowerCamelCase )
UpperCamelCase_ : Union[str, Any] = inputs_dict.input_ids.to(args.device )
UpperCamelCase_ : str = inputs_dict.attention_mask.to(args.device )
UpperCamelCase_ : List[Any] = rag_model.generate( # rag_model overwrites generate
lowerCamelCase , attention_mask=lowerCamelCase , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=lowerCamelCase , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
UpperCamelCase_ : str = rag_model.retriever.generator_tokenizer.batch_decode(lowerCamelCase , skip_special_tokens=lowerCamelCase )
if args.print_predictions:
for q, a in zip(lowerCamelCase , lowerCamelCase ):
logger.info('Q: {} - A: {}'.format(lowerCamelCase , lowerCamelCase ) )
return answers
def __lowercase ( ):
UpperCamelCase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--model_type' , choices=['rag_sequence', 'rag_token', 'bart'] , type=lowerCamelCase , help=(
'RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'
' model_name_or_path'
) , )
parser.add_argument(
'--index_name' , default=lowerCamelCase , choices=['exact', 'compressed', 'legacy'] , type=lowerCamelCase , help='RAG model retriever type' , )
parser.add_argument(
'--index_path' , default=lowerCamelCase , type=lowerCamelCase , help='Path to the retrieval index' , )
parser.add_argument('--n_docs' , default=5 , type=lowerCamelCase , help='Number of retrieved docs' )
parser.add_argument(
'--model_name_or_path' , default=lowerCamelCase , type=lowerCamelCase , required=lowerCamelCase , help='Path to pretrained checkpoints or model identifier from huggingface.co/models' , )
parser.add_argument(
'--eval_mode' , choices=['e2e', 'retrieval'] , default='e2e' , type=lowerCamelCase , help=(
'Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'
' precision@k.'
) , )
parser.add_argument('--k' , default=1 , type=lowerCamelCase , help='k for the precision@k calculation' )
parser.add_argument(
'--evaluation_set' , default=lowerCamelCase , type=lowerCamelCase , required=lowerCamelCase , help='Path to a file containing evaluation samples' , )
parser.add_argument(
'--gold_data_path' , default=lowerCamelCase , type=lowerCamelCase , required=lowerCamelCase , help='Path to a tab-separated file with gold samples' , )
parser.add_argument(
'--gold_data_mode' , default='qa' , type=lowerCamelCase , choices=['qa', 'ans'] , help=(
'Format of the gold data file'
'qa - a single line in the following format: question [tab] answer_list'
'ans - a single line of the gold file contains the expected answer string'
) , )
parser.add_argument(
'--predictions_path' , type=lowerCamelCase , default='predictions.txt' , help='Name of the predictions file, to be stored in the checkpoints directory' , )
parser.add_argument(
'--eval_all_checkpoints' , action='store_true' , help='Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number' , )
parser.add_argument(
'--eval_batch_size' , default=8 , type=lowerCamelCase , help='Batch size per GPU/CPU for evaluation.' , )
parser.add_argument(
'--recalculate' , help='Recalculate predictions even if the prediction file exists' , action='store_true' , )
parser.add_argument(
'--num_beams' , default=4 , type=lowerCamelCase , help='Number of beams to be used when generating answers' , )
parser.add_argument('--min_length' , default=1 , type=lowerCamelCase , help='Min length of the generated answers' )
parser.add_argument('--max_length' , default=50 , type=lowerCamelCase , help='Max length of the generated answers' )
parser.add_argument(
'--print_predictions' , action='store_true' , help='If True, prints predictions while evaluating.' , )
parser.add_argument(
'--print_docs' , action='store_true' , help='If True, prints docs retried while generating.' , )
UpperCamelCase_ : Union[str, Any] = parser.parse_args()
UpperCamelCase_ : Union[str, Any] = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
return args
def __lowercase ( lowerCamelCase : int ):
UpperCamelCase_ : Any = {}
if args.model_type is None:
UpperCamelCase_ : List[Any] = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('rag' ):
UpperCamelCase_ : Optional[int] = RagTokenForGeneration if args.model_type == 'rag_token' else RagSequenceForGeneration
UpperCamelCase_ : Dict = args.n_docs
if args.index_name is not None:
UpperCamelCase_ : Union[str, Any] = args.index_name
if args.index_path is not None:
UpperCamelCase_ : str = args.index_path
else:
UpperCamelCase_ : Tuple = BartForConditionalGeneration
UpperCamelCase_ : Optional[int] = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('Evaluate the following checkpoints: %s' , lowerCamelCase )
UpperCamelCase_ : Optional[int] = get_scores if args.eval_mode == 'e2e' else get_precision_at_k
UpperCamelCase_ : Optional[int] = evaluate_batch_eae if args.eval_mode == 'e2e' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('Calculating metrics based on an existing predictions file: {}'.format(args.predictions_path ) )
score_fn(lowerCamelCase , args.predictions_path , args.gold_data_path )
continue
logger.info('***** Running evaluation for {} *****'.format(lowerCamelCase ) )
logger.info(' Batch size = %d' , args.eval_batch_size )
logger.info(' Predictions will be stored under {}'.format(args.predictions_path ) )
if args.model_type.startswith('rag' ):
UpperCamelCase_ : List[str] = RagRetriever.from_pretrained(lowerCamelCase , **lowerCamelCase )
UpperCamelCase_ : List[Any] = model_class.from_pretrained(lowerCamelCase , retriever=lowerCamelCase , **lowerCamelCase )
model.retriever.init_retrieval()
else:
UpperCamelCase_ : Optional[Any] = model_class.from_pretrained(lowerCamelCase , **lowerCamelCase )
model.to(args.device )
with open(args.evaluation_set , 'r' ) as eval_file, open(args.predictions_path , 'w' ) as preds_file:
UpperCamelCase_ : Optional[Any] = []
for line in tqdm(lowerCamelCase ):
questions.append(line.strip() )
if len(lowerCamelCase ) == args.eval_batch_size:
UpperCamelCase_ : Dict = evaluate_batch_fn(lowerCamelCase , lowerCamelCase , lowerCamelCase )
preds_file.write('\n'.join(lowerCamelCase ) + '\n' )
preds_file.flush()
UpperCamelCase_ : Tuple = []
if len(lowerCamelCase ) > 0:
UpperCamelCase_ : Optional[int] = evaluate_batch_fn(lowerCamelCase , lowerCamelCase , lowerCamelCase )
preds_file.write('\n'.join(lowerCamelCase ) )
preds_file.flush()
score_fn(lowerCamelCase , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
a_ = get_args()
main(args)
| 175 | import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def __lowercase ( lowerCamelCase : str , lowerCamelCase : str , **lowerCamelCase : List[Any] ):
UpperCamelCase_ : Union[str, Any] = AutoConfig.from_pretrained(lowerCamelCase , **lowerCamelCase )
UpperCamelCase_ : str = AutoModelForSeqaSeqLM.from_config(lowerCamelCase )
model.save_pretrained(lowerCamelCase )
AutoTokenizer.from_pretrained(lowerCamelCase ).save_pretrained(lowerCamelCase )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 175 | 1 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class __snake_case (_lowercase ):
lowerCAmelCase__ = ["image_processor", "tokenizer"]
lowerCAmelCase__ = "OwlViTImageProcessor"
lowerCAmelCase__ = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : Any , _UpperCAmelCase : int=None , _UpperCAmelCase : Optional[Any]=None , **_UpperCAmelCase : List[str] ) -> Union[str, Any]:
'''simple docstring'''
_lowerCAmelCase : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __UpperCamelCase , )
_lowerCAmelCase : List[Any] = kwargs.pop("""feature_extractor""" )
_lowerCAmelCase : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__UpperCamelCase , __UpperCamelCase )
def __call__( self : List[str] , _UpperCAmelCase : Dict=None , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : Optional[Any]="max_length" , _UpperCAmelCase : List[Any]="np" , **_UpperCAmelCase : Optional[int] ) -> Optional[int]:
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
"""You have to specify at least one text or query image or image. All three cannot be none.""" )
if text is not None:
if isinstance(__UpperCamelCase , __UpperCamelCase ) or (isinstance(__UpperCamelCase , __UpperCamelCase ) and not isinstance(text[0] , __UpperCamelCase )):
_lowerCAmelCase : str = [self.tokenizer(__UpperCamelCase , padding=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase )]
elif isinstance(__UpperCamelCase , __UpperCamelCase ) and isinstance(text[0] , __UpperCamelCase ):
_lowerCAmelCase : Union[str, Any] = []
# Maximum number of queries across batch
_lowerCAmelCase : Union[str, Any] = max([len(__UpperCamelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__UpperCamelCase ) != max_num_queries:
_lowerCAmelCase : int = t + [""" """] * (max_num_queries - len(__UpperCamelCase ))
_lowerCAmelCase : List[Any] = self.tokenizer(__UpperCamelCase , padding=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase )
encodings.append(__UpperCamelCase )
else:
raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""" )
if return_tensors == "np":
_lowerCAmelCase : str = np.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
_lowerCAmelCase : Any = np.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
_lowerCAmelCase : Optional[Any] = jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
_lowerCAmelCase : Optional[Any] = jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
_lowerCAmelCase : Tuple = torch.cat([encoding["""input_ids"""] for encoding in encodings] , dim=0 )
_lowerCAmelCase : Tuple = torch.cat([encoding["""attention_mask"""] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
_lowerCAmelCase : List[Any] = tf.stack([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
_lowerCAmelCase : int = tf.stack([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
else:
raise ValueError("""Target return tensor type could not be returned""" )
_lowerCAmelCase : Union[str, Any] = BatchEncoding()
_lowerCAmelCase : int = input_ids
_lowerCAmelCase : str = attention_mask
if query_images is not None:
_lowerCAmelCase : List[Any] = BatchEncoding()
_lowerCAmelCase : List[Any] = self.image_processor(
__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase ).pixel_values
_lowerCAmelCase : str = query_pixel_values
if images is not None:
_lowerCAmelCase : Tuple = self.image_processor(__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase )
if text is not None and images is not None:
_lowerCAmelCase : Union[str, Any] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
_lowerCAmelCase : str = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCamelCase ) , tensor_type=__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( self : str , *_UpperCAmelCase : str , **_UpperCAmelCase : str ) -> List[Any]:
'''simple docstring'''
return self.image_processor.post_process(*__UpperCamelCase , **__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( self : str , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
return self.image_processor.post_process_object_detection(*__UpperCamelCase , **__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : Optional[Any] ) -> int:
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*__UpperCamelCase , **__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Any ) -> str:
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : List[Any] ) -> List[str]:
'''simple docstring'''
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __UpperCamelCase , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __UpperCamelCase , )
return self.image_processor
| 360 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def _UpperCAmelCase (UpperCamelCase_ : str , UpperCamelCase_ : Tuple=None ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = None
if token is not None:
_lowerCAmelCase : str = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"Bearer {token}"}
_lowerCAmelCase : Tuple = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
_lowerCAmelCase : str = requests.get(UpperCamelCase_ , headers=UpperCamelCase_ ).json()
_lowerCAmelCase : Optional[Any] = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
_lowerCAmelCase : List[str] = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(UpperCamelCase_ ):
_lowerCAmelCase : int = requests.get(url + F"&page={i + 2}" , headers=UpperCamelCase_ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def _UpperCAmelCase (UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any]=None ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = None
if token is not None:
_lowerCAmelCase : Optional[Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"Bearer {token}"}
_lowerCAmelCase : Optional[int] = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"
_lowerCAmelCase : Optional[int] = requests.get(UpperCamelCase_ , headers=UpperCamelCase_ ).json()
_lowerCAmelCase : List[str] = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
_lowerCAmelCase : List[str] = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(UpperCamelCase_ ):
_lowerCAmelCase : List[str] = requests.get(url + F"&page={i + 2}" , headers=UpperCamelCase_ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def _UpperCAmelCase (UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Any ):
'''simple docstring'''
_lowerCAmelCase : str = None
if token is not None:
_lowerCAmelCase : Any = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"Bearer {token}"}
_lowerCAmelCase : List[str] = requests.get(UpperCamelCase_ , headers=UpperCamelCase_ , allow_redirects=UpperCamelCase_ )
_lowerCAmelCase : List[str] = result.headers["""Location"""]
_lowerCAmelCase : List[Any] = requests.get(UpperCamelCase_ , allow_redirects=UpperCamelCase_ )
_lowerCAmelCase : int = os.path.join(UpperCamelCase_ , F"{artifact_name}.zip" )
with open(UpperCamelCase_ , """wb""" ) as fp:
fp.write(response.content )
def _UpperCAmelCase (UpperCamelCase_ : int , UpperCamelCase_ : Optional[int]=None ):
'''simple docstring'''
_lowerCAmelCase : Dict = []
_lowerCAmelCase : Any = []
_lowerCAmelCase : Union[str, Any] = None
with zipfile.ZipFile(UpperCamelCase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(UpperCamelCase_ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(UpperCamelCase_ ) as f:
for line in f:
_lowerCAmelCase : List[str] = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
_lowerCAmelCase : Union[str, Any] = line[: line.index(""": """ )]
_lowerCAmelCase : Union[str, Any] = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
_lowerCAmelCase : Tuple = line[len("""FAILED """ ) :]
failed_tests.append(UpperCamelCase_ )
elif filename == "job_name.txt":
_lowerCAmelCase : str = line
if len(UpperCamelCase_ ) != len(UpperCamelCase_ ):
raise ValueError(
F"`errors` and `failed_tests` should have the same number of elements. Got {len(UpperCamelCase_ )} for `errors` "
F"and {len(UpperCamelCase_ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"
""" problem.""" )
_lowerCAmelCase : int = None
if job_name and job_links:
_lowerCAmelCase : Optional[int] = job_links.get(UpperCamelCase_ , UpperCamelCase_ )
# A list with elements of the form (line of error, error, failed test)
_lowerCAmelCase : Tuple = [x + [y] + [job_link] for x, y in zip(UpperCamelCase_ , UpperCamelCase_ )]
return result
def _UpperCAmelCase (UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[Any]=None ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : List[Any] = [os.path.join(UpperCamelCase_ , UpperCamelCase_ ) for p in os.listdir(UpperCamelCase_ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(UpperCamelCase_ , job_links=UpperCamelCase_ ) )
return errors
def _UpperCAmelCase (UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict=None ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = Counter()
counter.update([x[1] for x in logs] )
_lowerCAmelCase : Dict = counter.most_common()
_lowerCAmelCase : Dict = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
_lowerCAmelCase : Union[str, Any] = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
_lowerCAmelCase : int = dict(sorted(r.items() , key=lambda UpperCamelCase_ : item[1]["count"] , reverse=UpperCamelCase_ ) )
return r
def _UpperCAmelCase (UpperCamelCase_ : Tuple ):
'''simple docstring'''
_lowerCAmelCase : List[str] = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
_lowerCAmelCase : Optional[Any] = test.split("""/""" )[2]
else:
_lowerCAmelCase : Union[str, Any] = None
return test
def _UpperCAmelCase (UpperCamelCase_ : List[str] , UpperCamelCase_ : Dict=None ):
'''simple docstring'''
_lowerCAmelCase : List[str] = [(x[0], x[1], get_model(x[2] )) for x in logs]
_lowerCAmelCase : List[str] = [x for x in logs if x[2] is not None]
_lowerCAmelCase : int = {x[2] for x in logs}
_lowerCAmelCase : str = {}
for test in tests:
_lowerCAmelCase : Union[str, Any] = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
_lowerCAmelCase : List[Any] = counter.most_common()
_lowerCAmelCase : Optional[int] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
_lowerCAmelCase : List[str] = sum(error_counts.values() )
if n_errors > 0:
_lowerCAmelCase : int = {"""count""": n_errors, """errors""": error_counts}
_lowerCAmelCase : Dict = dict(sorted(r.items() , key=lambda UpperCamelCase_ : item[1]["count"] , reverse=UpperCamelCase_ ) )
return r
def _UpperCAmelCase (UpperCamelCase_ : Union[str, Any] ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = """| no. | error | status |"""
_lowerCAmelCase : List[Any] = """|-:|:-|:-|"""
_lowerCAmelCase : str = [header, sep]
for error in reduced_by_error:
_lowerCAmelCase : Optional[Any] = reduced_by_error[error]["""count"""]
_lowerCAmelCase : int = F"| {count} | {error[:100]} | |"
lines.append(UpperCamelCase_ )
return "\n".join(UpperCamelCase_ )
def _UpperCAmelCase (UpperCamelCase_ : List[str] ):
'''simple docstring'''
_lowerCAmelCase : str = """| model | no. of errors | major error | count |"""
_lowerCAmelCase : Any = """|-:|-:|-:|-:|"""
_lowerCAmelCase : str = [header, sep]
for model in reduced_by_model:
_lowerCAmelCase : Union[str, Any] = reduced_by_model[model]["""count"""]
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = list(reduced_by_model[model]["""errors"""].items() )[0]
_lowerCAmelCase : str = F"| {model} | {count} | {error[:60]} | {_count} |"
lines.append(UpperCamelCase_ )
return "\n".join(UpperCamelCase_ )
if __name__ == "__main__":
_lowerCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
_lowerCamelCase : Tuple = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
_lowerCamelCase : Optional[int] = get_job_links(args.workflow_run_id, token=args.token)
_lowerCamelCase : int = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
_lowerCamelCase : Optional[Any] = k.find(" / ")
_lowerCamelCase : Tuple = k[index + len(" / ") :]
_lowerCamelCase : List[Any] = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
_lowerCamelCase : Union[str, Any] = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
_lowerCamelCase : str = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
_lowerCamelCase : Dict = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
_lowerCamelCase : Union[str, Any] = counter.most_common(3_0)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
_lowerCamelCase : str = reduce_by_error(errors)
_lowerCamelCase : Tuple = reduce_by_model(errors)
_lowerCamelCase : List[str] = make_github_table(reduced_by_error)
_lowerCamelCase : Optional[Any] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 159 | 0 |
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
lowercase : Optional[Any] = logging.get_logger(__name__)
class __snake_case ( lowerCAmelCase ):
_a : List[str]= CLIPConfig
_a : Tuple= ["CLIPEncoderLayer"]
def __init__( self ,snake_case ):
'''simple docstring'''
super().__init__(snake_case )
lowercase : Union[str, Any] = CLIPVisionModelWithProjection(config.vision_config )
lowercase : Tuple = nn.Linear(config.vision_config.projection_dim ,1 )
lowercase : Optional[Any] = nn.Linear(config.vision_config.projection_dim ,1 )
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case=0.5 ,snake_case=0.5 ):
'''simple docstring'''
lowercase : str = self.vision_model(snake_case )[0]
lowercase : List[str] = self.p_head(snake_case )
lowercase : List[Any] = nsfw_detected.flatten()
lowercase : Union[str, Any] = nsfw_detected > p_threshold
lowercase : Dict = nsfw_detected.tolist()
if any(snake_case ):
logger.warning(
"""Potential NSFW content was detected in one or more images. A black image will be returned instead."""
""" Try again with a different prompt and/or seed.""" )
for idx, nsfw_detected_ in enumerate(snake_case ):
if nsfw_detected_:
lowercase : Any = np.zeros(images[idx].shape )
lowercase : int = self.w_head(snake_case )
lowercase : List[str] = watermark_detected.flatten()
lowercase : List[Any] = watermark_detected > w_threshold
lowercase : List[Any] = watermark_detected.tolist()
if any(snake_case ):
logger.warning(
"""Potential watermarked content was detected in one or more images. A black image will be returned instead."""
""" Try again with a different prompt and/or seed.""" )
for idx, watermark_detected_ in enumerate(snake_case ):
if watermark_detected_:
lowercase : List[Any] = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 20 |
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
lowercase : List[str] = logging.get_logger(__name__)
@add_end_docstrings(
lowerCAmelCase , R"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n " , )
class __snake_case ( lowerCAmelCase ):
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
if self.framework == "tf":
lowercase : str = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
lowercase : Optional[int] = torch.nonzero(input_ids == self.tokenizer.mask_token_id ,as_tuple=snake_case )
else:
raise ValueError("""Unsupported framework""" )
return masked_index
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Tuple = self.get_masked_index(snake_case )
lowercase : Dict = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"""fill-mask""" ,self.model.base_model_prefix ,f"No mask_token ({self.tokenizer.mask_token}) found on the input" ,)
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
if isinstance(snake_case ,snake_case ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["""input_ids"""][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case=None ,**snake_case ):
'''simple docstring'''
if return_tensors is None:
lowercase : int = self.framework
lowercase : Optional[Any] = self.tokenizer(snake_case ,return_tensors=snake_case )
self.ensure_exactly_one_mask_token(snake_case )
return model_inputs
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Optional[int] = self.model(**snake_case )
lowercase : Tuple = model_inputs["""input_ids"""]
return model_outputs
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case=5 ,snake_case=None ):
'''simple docstring'''
if target_ids is not None and target_ids.shape[0] < top_k:
lowercase : str = target_ids.shape[0]
lowercase : Optional[Any] = model_outputs["""input_ids"""][0]
lowercase : List[str] = model_outputs["""logits"""]
if self.framework == "tf":
lowercase : List[str] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
lowercase : Tuple = outputs.numpy()
lowercase : Tuple = outputs[0, masked_index, :]
lowercase : Any = stable_softmax(snake_case ,axis=-1 )
if target_ids is not None:
lowercase : Union[str, Any] = tf.gather_nd(tf.squeeze(snake_case ,0 ) ,target_ids.reshape(-1 ,1 ) )
lowercase : int = tf.expand_dims(snake_case ,0 )
lowercase : Tuple = tf.math.top_k(snake_case ,k=snake_case )
lowercase , lowercase : int = topk.values.numpy(), topk.indices.numpy()
else:
lowercase : Optional[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id ,as_tuple=snake_case ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
lowercase : Union[str, Any] = outputs[0, masked_index, :]
lowercase : Tuple = logits.softmax(dim=-1 )
if target_ids is not None:
lowercase : List[str] = probs[..., target_ids]
lowercase , lowercase : Union[str, Any] = probs.topk(snake_case )
lowercase : Any = []
lowercase : List[Any] = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() ,predictions.tolist() ) ):
lowercase : Dict = []
for v, p in zip(_values ,_predictions ):
# Copy is important since we're going to modify this array in place
lowercase : Dict = input_ids.numpy().copy()
if target_ids is not None:
lowercase : Union[str, Any] = target_ids[p].tolist()
lowercase : Tuple = p
# Filter padding out:
lowercase : List[str] = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
lowercase : Tuple = self.tokenizer.decode(snake_case ,skip_special_tokens=snake_case )
lowercase : Optional[Any] = {"""score""": v, """token""": p, """token_str""": self.tokenizer.decode([p] ), """sequence""": sequence}
row.append(snake_case )
result.append(snake_case )
if single_mask:
return result[0]
return result
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case=None ):
'''simple docstring'''
if isinstance(snake_case ,snake_case ):
lowercase : List[Any] = [targets]
try:
lowercase : List[str] = self.tokenizer.get_vocab()
except Exception:
lowercase : Any = {}
lowercase : Dict = []
for target in targets:
lowercase : Dict = vocab.get(snake_case ,snake_case )
if id_ is None:
lowercase : Optional[int] = self.tokenizer(
snake_case ,add_special_tokens=snake_case ,return_attention_mask=snake_case ,return_token_type_ids=snake_case ,max_length=1 ,truncation=snake_case ,)["""input_ids"""]
if len(snake_case ) == 0:
logger.warning(
f"The specified target token `{target}` does not exist in the model vocabulary. "
"""We cannot replace it with anything meaningful, ignoring it""" )
continue
lowercase : Union[str, Any] = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f"The specified target token `{target}` does not exist in the model vocabulary. "
f"Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`." )
target_ids.append(id_ )
lowercase : Optional[Any] = list(set(snake_case ) )
if len(snake_case ) == 0:
raise ValueError("""At least one target must be provided when passed.""" )
lowercase : Optional[Any] = np.array(snake_case )
return target_ids
def _SCREAMING_SNAKE_CASE ( self ,snake_case=None ,snake_case=None ):
'''simple docstring'''
lowercase : Dict = {}
if targets is not None:
lowercase : str = self.get_target_ids(snake_case ,snake_case )
lowercase : List[Any] = target_ids
if top_k is not None:
lowercase : List[str] = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"""fill-mask""" ,self.model.base_model_prefix ,"""The tokenizer does not define a `mask_token`.""" )
return {}, {}, postprocess_params
def __call__( self ,snake_case ,*snake_case ,**snake_case ):
'''simple docstring'''
lowercase : Tuple = super().__call__(snake_case ,**snake_case )
if isinstance(snake_case ,snake_case ) and len(snake_case ) == 1:
return outputs[0]
return outputs
| 20 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : int , lowerCAmelCase__ : Any ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = data
_UpperCamelCase = None
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = None
_UpperCamelCase = None
def __iter__( self : int ) -> Iterator[Any]:
'''simple docstring'''
_UpperCamelCase = self.head
while self.head:
yield node.data
_UpperCamelCase = node.next
if node == self.head:
break
def __len__( self : Union[str, Any] ) -> int:
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self : str ) -> Union[str, Any]:
'''simple docstring'''
return "->".join(str(UpperCamelCase__ ) for item in iter(self ) )
def snake_case__ ( self : List[str] , lowerCAmelCase__ : Any ) -> None:
'''simple docstring'''
self.insert_nth(len(self ) , UpperCamelCase__ )
def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : Any ) -> None:
'''simple docstring'''
self.insert_nth(0 , UpperCamelCase__ )
def snake_case__ ( self : List[str] , lowerCAmelCase__ : int , lowerCAmelCase__ : Any ) -> None:
'''simple docstring'''
if index < 0 or index > len(self ):
raise IndexError('''list index out of range.''' )
_UpperCamelCase = Node(UpperCamelCase__ )
if self.head is None:
_UpperCamelCase = new_node # first node points itself
_UpperCamelCase = _UpperCamelCase = new_node
elif index == 0: # insert at head
_UpperCamelCase = self.head
_UpperCamelCase = _UpperCamelCase = new_node
else:
_UpperCamelCase = self.head
for _ in range(index - 1 ):
_UpperCamelCase = temp.next
_UpperCamelCase = temp.next
_UpperCamelCase = new_node
if index == len(self ) - 1: # insert at tail
_UpperCamelCase = new_node
def snake_case__ ( self : Any ) -> Any:
'''simple docstring'''
return self.delete_nth(0 )
def snake_case__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def snake_case__ ( self : int , lowerCAmelCase__ : int = 0 ) -> Any:
'''simple docstring'''
if not 0 <= index < len(self ):
raise IndexError('''list index out of range.''' )
_UpperCamelCase = self.head
if self.head == self.tail: # just one node
_UpperCamelCase = _UpperCamelCase = None
elif index == 0: # delete head node
_UpperCamelCase = self.tail.next.next
_UpperCamelCase = self.head.next
else:
_UpperCamelCase = self.head
for _ in range(index - 1 ):
_UpperCamelCase = temp.next
_UpperCamelCase = temp.next
_UpperCamelCase = temp.next.next
if index == len(self ) - 1: # delete at tail
_UpperCamelCase = temp
return delete_node.data
def snake_case__ ( self : str ) -> bool:
'''simple docstring'''
return len(self ) == 0
def a__ ( ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = CircularLinkedList()
assert len(__UpperCamelCase ) == 0
assert circular_linked_list.is_empty() is True
assert str(__UpperCamelCase ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(__UpperCamelCase ) == i
circular_linked_list.insert_nth(__UpperCamelCase, i + 1 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1, 6 ) )
circular_linked_list.insert_tail(6 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1, 7 ) )
circular_linked_list.insert_head(0 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(0, 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1, 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2, 3 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1, 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Union[str, Any]=13 , lowerCAmelCase__ : Union[str, Any]=7 , lowerCAmelCase__ : str=True , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : int=99 , lowerCAmelCase__ : str=32 , lowerCAmelCase__ : str=5 , lowerCAmelCase__ : str=4 , lowerCAmelCase__ : str=37 , lowerCAmelCase__ : int="gelu" , lowerCAmelCase__ : Optional[Any]=0.1 , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : Optional[int]=512 , lowerCAmelCase__ : Dict=16 , lowerCAmelCase__ : List[Any]=2 , lowerCAmelCase__ : Any=0.02 , lowerCAmelCase__ : Union[str, Any]=4 , ) -> Dict:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_attention_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_choices
def snake_case__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_attention_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCamelCase = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def snake_case__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def snake_case__ ( self : List[str] ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = True
_UpperCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Optional[int] = True
_snake_case : Optional[Any] = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = FlaxRobertaModelTester(self )
@slow
def snake_case__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCamelCase = model_class_name.from_pretrained('''roberta-base''' , from_pt=lowerCAmelCase__ )
_UpperCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCAmelCase__ )
| 287 | 0 |
'''simple docstring'''
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def lowerCamelCase ( self : List[Any] ):
snake_case__ : str = tempfile.mkdtemp()
snake_case__ : Tuple = 5
# Realm tok
snake_case__ : Union[str, Any] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""test""",
"""question""",
"""this""",
"""is""",
"""the""",
"""first""",
"""second""",
"""third""",
"""fourth""",
"""fifth""",
"""record""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
snake_case__ : Optional[int] = os.path.join(self.tmpdirname , """realm_tokenizer""" )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
snake_case__ : Union[str, Any] = os.path.join(snake_case_ , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
snake_case__ : List[Any] = os.path.join(self.tmpdirname , """realm_block_records""" )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
def lowerCamelCase ( self : List[str] ):
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , """realm_tokenizer""" ) )
def lowerCamelCase ( self : str ):
shutil.rmtree(self.tmpdirname )
def lowerCamelCase ( self : int ):
snake_case__ : List[Any] = RealmConfig(num_block_records=self.num_block_records )
return config
def lowerCamelCase ( self : int ):
snake_case__ : str = Dataset.from_dict(
{
"""id""": ["""0""", """1"""],
"""question""": ["""foo""", """bar"""],
"""answers""": [["""Foo""", """Bar"""], ["""Bar"""]],
} )
return dataset
def lowerCamelCase ( self : List[str] ):
snake_case__ : str = np.array(
[
b"""This is the first record""",
b"""This is the second record""",
b"""This is the third record""",
b"""This is the fourth record""",
b"""This is the fifth record""",
b"""This is a longer longer longer record""",
] , dtype=snake_case_ , )
return block_records
def lowerCamelCase ( self : int ):
snake_case__ : Dict = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def lowerCamelCase ( self : int ):
snake_case__ : Tuple = self.get_config()
snake_case__ : Any = self.get_dummy_retriever()
snake_case__ : str = retriever.tokenizer
snake_case__ : Optional[int] = np.array([0, 3] , dtype="""long""" )
snake_case__ : List[Any] = tokenizer(["""Test question"""] ).input_ids
snake_case__ : List[str] = tokenizer(
["""the fourth"""] , add_special_tokens=snake_case_ , return_token_type_ids=snake_case_ , return_attention_mask=snake_case_ , ).input_ids
snake_case__ : List[Any] = config.reader_seq_len
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Optional[int] = retriever(
snake_case_ , snake_case_ , answer_ids=snake_case_ , max_length=snake_case_ , return_tensors="""np""" )
self.assertEqual(len(snake_case_ ) , 2 )
self.assertEqual(len(snake_case_ ) , 2 )
self.assertEqual(len(snake_case_ ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["""[CLS]""", """test""", """question""", """[SEP]""", """this""", """is""", """the""", """first""", """record""", """[SEP]"""] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["""[CLS]""", """test""", """question""", """[SEP]""", """this""", """is""", """the""", """fourth""", """record""", """[SEP]"""] , )
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ : List[str] = self.get_config()
snake_case__ : List[str] = self.get_dummy_retriever()
snake_case__ : str = retriever.tokenizer
snake_case__ : List[Any] = np.array([0, 3, 5] , dtype="""long""" )
snake_case__ : Union[str, Any] = tokenizer(["""Test question"""] ).input_ids
snake_case__ : Optional[int] = tokenizer(
["""the fourth""", """longer longer"""] , add_special_tokens=snake_case_ , return_token_type_ids=snake_case_ , return_attention_mask=snake_case_ , ).input_ids
snake_case__ : Any = config.reader_seq_len
snake_case__ , snake_case__ , snake_case__ , snake_case__ : List[str] = retriever(
snake_case_ , snake_case_ , answer_ids=snake_case_ , max_length=snake_case_ , return_tensors="""np""" )
self.assertEqual([False, True, True] , snake_case_ )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , snake_case_ )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , snake_case_ )
def lowerCamelCase ( self : Any ):
snake_case__ : List[str] = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , """realm_block_records""" ) )
# Test local path
snake_case__ : Union[str, Any] = retriever.from_pretrained(os.path.join(self.tmpdirname , """realm_block_records""" ) )
self.assertEqual(retriever.block_records[0] , b"""This is the first record""" )
# Test mocked remote path
with patch("""transformers.models.realm.retrieval_realm.hf_hub_download""" ) as mock_hf_hub_download:
snake_case__ : Tuple = os.path.join(
os.path.join(self.tmpdirname , """realm_block_records""" ) , _REALM_BLOCK_RECORDS_FILENAME )
snake_case__ : List[str] = RealmRetriever.from_pretrained("""google/realm-cc-news-pretrained-openqa""" )
self.assertEqual(retriever.block_records[0] , b"""This is the first record""" )
| 35 |
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : Union[str, Any] = BarthezTokenizer
A_ : Tuple = BarthezTokenizerFast
A_ : Dict = True
A_ : List[str] = True
def __lowerCamelCase ( self ):
super().setUp()
__lowerCAmelCase : str = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = tokenizer
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = '<pad>'
__lowerCAmelCase : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 10_11_22 )
def __lowerCamelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_11_22 )
@require_torch
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__lowerCAmelCase : Optional[Any] = [0, 57, 30_18, 7_03_07, 91, 2]
__lowerCAmelCase : Optional[int] = self.tokenizer(
_SCREAMING_SNAKE_CASE , max_length=len(_SCREAMING_SNAKE_CASE ) , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
__lowerCAmelCase : List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
if not self.test_rust_tokenizer:
return
__lowerCAmelCase : Tuple = self.get_tokenizer()
__lowerCAmelCase : Optional[int] = self.get_rust_tokenizer()
__lowerCAmelCase : List[str] = 'I was born in 92000, and this is falsé.'
__lowerCAmelCase : Optional[int] = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = self.get_rust_tokenizer()
__lowerCAmelCase : List[Any] = tokenizer.encode(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
# fmt: off
__lowerCAmelCase : str = {'input_ids': [[0, 4_90, 1_43_28, 45_07, 3_54, 47, 4_36_69, 95, 25, 7_81_17, 2_02_15, 1_97_79, 1_90, 22, 4_00, 4, 3_53_43, 8_03_10, 6_03, 86, 2_49_37, 1_05, 3_34_38, 9_47_62, 1_96, 3_96_42, 7, 15, 1_59_33, 1_73, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_05_34, 87, 25, 66, 33_58, 1_96, 5_52_89, 8, 8_29_61, 81, 22_04, 7_52_03, 7, 15, 7_63, 1_29_56, 2_16, 1_78, 1_43_28, 95_95, 13_77, 6_96_93, 7, 4_48, 7_10_21, 1_96, 1_81_06, 14_37, 1_39_74, 1_08, 90_83, 4, 4_93_15, 7, 39, 86, 13_26, 27_93, 4_63_33, 4, 4_48, 1_96, 7_45_88, 7, 4_93_15, 7, 39, 21, 8_22, 3_84_70, 74, 21, 6_67_23, 6_24_80, 8, 2_20_50, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__lowerCAmelCase : Union[str, Any] = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=_SCREAMING_SNAKE_CASE , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=_SCREAMING_SNAKE_CASE , ) | 86 | 0 |
import operator
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None ) -> List[Any]:
lowerCAmelCase__ : Dict = operator.lt if reverse else operator.gt
lowerCAmelCase__ : int = solution or []
if not arr:
return solution
lowerCAmelCase__ : int = [arr.pop(0 )]
for i, item in enumerate(SCREAMING_SNAKE_CASE_ ):
if _operator(SCREAMING_SNAKE_CASE_ , sublist[-1] ):
sublist.append(SCREAMING_SNAKE_CASE_ )
arr.pop(SCREAMING_SNAKE_CASE_ )
# merging sublist into solution list
if not solution:
solution.extend(SCREAMING_SNAKE_CASE_ )
else:
while sublist:
lowerCAmelCase__ : Optional[int] = sublist.pop(0 )
for i, xx in enumerate(SCREAMING_SNAKE_CASE_ ):
if not _operator(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
solution.insert(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
break
else:
solution.append(SCREAMING_SNAKE_CASE_ )
strand_sort(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1] | 361 |
from __future__ import annotations
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> list[list[int]]:
lowerCAmelCase__ : list[list[int]] = []
create_all_state(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , [] , SCREAMING_SNAKE_CASE_ )
return result
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> None:
if level == 0:
total_list.append(current_list[:] )
return
for i in range(SCREAMING_SNAKE_CASE_ , total_number - level + 2 ):
current_list.append(SCREAMING_SNAKE_CASE_ )
create_all_state(i + 1 , SCREAMING_SNAKE_CASE_ , level - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
current_list.pop()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> None:
for i in total_list:
print(*SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowerCamelCase__ = 4
lowerCamelCase__ = 2
lowerCamelCase__ = generate_all_combinations(n, k)
print_all_state(total_list) | 307 | 0 |
'''simple docstring'''
import os
import sys
import unittest
__SCREAMING_SNAKE_CASE : List[str] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(git_repo_path, """src""", """transformers""")
__SCREAMING_SNAKE_CASE : Optional[int] = """
{0} = None
"""
__SCREAMING_SNAKE_CASE : Dict = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
class lowerCamelCase_ (unittest.TestCase ):
'''simple docstring'''
def _A ( self : Any ):
_UpperCAmelCase : Optional[Any] = find_backend(" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")" )
self.assertIsNone(A )
_UpperCAmelCase : int = find_backend(" if not is_tokenizers_available():" )
self.assertEqual(A , "tokenizers" )
_UpperCAmelCase : Optional[int] = find_backend(" if not is_tensorflow_text_available():" )
self.assertEqual(A , "tensorflow_text" )
_UpperCAmelCase : List[str] = find_backend(" if not (is_sentencepiece_available() and is_tokenizers_available()):" )
self.assertEqual(A , "sentencepiece_and_tokenizers" )
_UpperCAmelCase : Any = find_backend(
" if not (is_sentencepiece_available() and is_tensorflow_text_available()):" )
self.assertEqual(A , "sentencepiece_and_tensorflow_text" )
_UpperCAmelCase : Union[str, Any] = find_backend(
" if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):" )
self.assertEqual(A , "sentencepiece_and_tokenizers_and_vision" )
def _A ( self : Any ):
_UpperCAmelCase : str = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" , A )
self.assertIn("tensorflow_text" , A )
self.assertIn("sentencepiece_and_tokenizers" , A )
# Likewise, we can't assert on the exact content of a key
self.assertIn("BertModel" , objects["torch"] )
self.assertIn("TFBertModel" , objects["tf"] )
self.assertIn("FlaxBertModel" , objects["flax"] )
self.assertIn("BertModel" , objects["torch"] )
self.assertIn("TFBertTokenizer" , objects["tensorflow_text"] )
self.assertIn("convert_slow_tokenizer" , objects["sentencepiece_and_tokenizers"] )
def _A ( self : Dict ):
_UpperCAmelCase : List[str] = create_dummy_object("CONSTANT" , "'torch'" )
self.assertEqual(A , "\nCONSTANT = None\n" )
_UpperCAmelCase : Union[str, Any] = create_dummy_object("function" , "'torch'" )
self.assertEqual(
A , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" )
_UpperCAmelCase : List[str] = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n"
_UpperCAmelCase : Any = create_dummy_object("FakeClass" , "'torch'" )
self.assertEqual(A , A )
def _A ( self : Optional[Any] ):
_UpperCAmelCase : Dict = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n"
_UpperCAmelCase : Tuple = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"] , A )
| 31 |
def lowerCAmelCase_ ( __UpperCAmelCase: int ) -> bool:
return str(__UpperCAmelCase ) == str(__UpperCAmelCase )[::-1]
def lowerCAmelCase_ ( __UpperCAmelCase: int ) -> int:
return int(__UpperCAmelCase ) + int(str(__UpperCAmelCase )[::-1] )
def lowerCAmelCase_ ( __UpperCAmelCase: int = 1_0000 ) -> int:
UpperCamelCase__ : Optional[Any] = []
for num in range(1 , __UpperCAmelCase ):
UpperCamelCase__ : str = 0
UpperCamelCase__ : Any = num
while iterations < 50:
UpperCamelCase__ : List[Any] = sum_reverse(__UpperCAmelCase )
iterations += 1
if is_palindrome(__UpperCAmelCase ):
break
else:
lychrel_nums.append(__UpperCAmelCase )
return len(__UpperCAmelCase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 201 | 0 |
'''simple docstring'''
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
a_ : Optional[int] = "Usage of script: script_name <size_of_canvas:int>"
a_ : Optional[Any] = [0] * 1_0_0 + [1] * 1_0
random.shuffle(choice)
def _A (lowerCAmelCase__ :int ) -> list[list[bool]]:
'''simple docstring'''
_a = [[False for i in range(lowerCAmelCase__ )] for j in range(lowerCAmelCase__ )]
return canvas
def _A (lowerCAmelCase__ :list[list[bool]] ) -> None:
'''simple docstring'''
for i, row in enumerate(lowerCAmelCase__ ):
for j, _ in enumerate(lowerCAmelCase__ ):
_a = bool(random.getrandbits(1 ) )
def _A (lowerCAmelCase__ :list[list[bool]] ) -> list[list[bool]]:
'''simple docstring'''
_a = np.array(lowerCAmelCase__ )
_a = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(lowerCAmelCase__ ):
for c, pt in enumerate(lowerCAmelCase__ ):
_a = __judge_point(
lowerCAmelCase__ , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
_a = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
_a = current_canvas.tolist()
return return_canvas
def _A (lowerCAmelCase__ :bool , lowerCAmelCase__ :list[list[bool]] ) -> bool:
'''simple docstring'''
_a = 0
_a = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
_a = pt
if pt:
if alive < 2:
_a = False
elif alive == 2 or alive == 3:
_a = True
elif alive > 3:
_a = False
else:
if alive == 3:
_a = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
a_ : str = int(sys.argv[1])
# main working structure of this module.
a_ : Optional[Any] = create_canvas(canvas_size)
seed(c)
a_ : Any = plt.subplots()
fig.show()
a_ : Union[str, Any] = ListedColormap(["w", "k"])
try:
while True:
a_ : Any = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 355 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
a_ : Tuple = logging.get_logger(__name__)
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = ["""input_features"""]
def __init__( self , __magic_name__=80 , __magic_name__=1_60_00 , __magic_name__=1_60 , __magic_name__=30 , __magic_name__=4_00 , __magic_name__=0.0 , __magic_name__=False , **__magic_name__ , ) -> Optional[int]:
super().__init__(
feature_size=__magic_name__ , sampling_rate=__magic_name__ , padding_value=__magic_name__ , return_attention_mask=__magic_name__ , **__magic_name__ , )
_a = n_fft
_a = hop_length
_a = chunk_length
_a = chunk_length * sampling_rate
_a = self.n_samples // hop_length
_a = sampling_rate
_a = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__magic_name__ , min_frequency=0.0 , max_frequency=8_0_0_0.0 , sampling_rate=__magic_name__ , norm='slaney' , mel_scale='slaney' , )
def __UpperCAmelCase ( self , __magic_name__ ) -> np.ndarray:
_a = spectrogram(
__magic_name__ , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='log10' , )
_a = log_spec[:, :-1]
_a = np.maximum(__magic_name__ , log_spec.max() - 8.0 )
_a = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def __UpperCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ = 0.0 ) -> List[np.ndarray]:
if attention_mask is not None:
_a = np.array(__magic_name__ , np.intaa )
_a = []
for vector, length in zip(__magic_name__ , attention_mask.sum(-1 ) ):
_a = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
_a = padding_value
normed_input_values.append(__magic_name__ )
else:
_a = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self , __magic_name__ , __magic_name__ = True , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = "max_length" , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , **__magic_name__ , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
f' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
f' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
_a = isinstance(__magic_name__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
_a = is_batched_numpy or (
isinstance(__magic_name__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_a = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__magic_name__ , np.ndarray ):
_a = np.asarray(__magic_name__ , dtype=np.floataa )
elif isinstance(__magic_name__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_a = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_a = [np.asarray([raw_speech] ).T]
_a = BatchFeature({'input_features': raw_speech} )
# convert into correct format for padding
_a = self.pad(
__magic_name__ , padding=__magic_name__ , max_length=max_length if max_length else self.n_samples , truncation=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
_a = self.zero_mean_unit_var_norm(
padded_inputs['input_features'] , attention_mask=padded_inputs['attention_mask'] , padding_value=self.padding_value , )
_a = np.stack(padded_inputs['input_features'] , axis=0 )
# make sure list is in array format
_a = padded_inputs.get('input_features' ).transpose(2 , 0 , 1 )
_a = [self._np_extract_fbank_features(__magic_name__ ) for waveform in input_features[0]]
if isinstance(input_features[0] , __magic_name__ ):
_a = [np.asarray(__magic_name__ , dtype=np.floataa ) for feature in input_features]
else:
_a = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
_a = padded_inputs['attention_mask'][:, :: self.hop_length]
if return_tensors is not None:
_a = padded_inputs.convert_to_tensors(__magic_name__ )
return padded_inputs
def __UpperCAmelCase ( self ) -> Dict[str, Any]:
_a = copy.deepcopy(self.__dict__ )
_a = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 104 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=7 , UpperCAmelCase=3 , UpperCAmelCase=18 , UpperCAmelCase=30 , UpperCAmelCase=400 , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=[0.5, 0.5, 0.5] , UpperCAmelCase=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size if size is not None else {'height': 18, 'width': 20}
_UpperCAmelCase = do_thumbnail
_UpperCAmelCase = do_align_axis
_UpperCAmelCase = do_pad
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean
_UpperCAmelCase = image_std
def UpperCamelCase ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __lowerCamelCase ( snake_case__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = DonutImageProcessor if is_vision_available() else None
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = DonutImageProcessingTester(self )
@property
def UpperCamelCase ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'size' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'do_thumbnail' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'do_align_long_axis' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'do_pad' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'image_mean' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'image_std' ) )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 20} )
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
# Previous config had dimensions in (width, height) order
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'height': 84, 'width': 42} )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
@is_flaky()
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , numpify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , torchify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 39 |
'''simple docstring'''
import os
def snake_case_ ():
UpperCAmelCase = os.path.join(os.path.dirname(_a ) , '''num.txt''' )
with open(_a ) as file_hand:
return str(sum(int(_a ) for line in file_hand ) )[:1_0]
if __name__ == "__main__":
print(solution())
| 34 | 0 |
"""simple docstring"""
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Any:
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(__lowerCAmelCase , n - 1 , __lowerCAmelCase ) * a) % mod
else:
SCREAMING_SNAKE_CASE__ : List[Any] = binary_exponentiation(__lowerCAmelCase , n / 2 , __lowerCAmelCase )
return (b * b) % mod
# a prime number
a :int = 701
a :List[str] = 1_000_000_000
a :int = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 56 |
"""simple docstring"""
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
return number | (1 << position)
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
return number & ~(1 << position)
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
return number ^ (1 << position)
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> bool:
return ((number >> position) & 1) == 1
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56 | 1 |
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = [1_0, 2_0, 3_0, 4_0, 5_0, 6_0]
__lowercase = [2, 4, 6, 8, 1_0, 1_2]
__lowercase = 1_0_0
self.assertEqual(kp.calc_profit(lowercase__ ,lowercase__ ,lowercase__ ) ,2_1_0 )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
self.assertRaisesRegex(lowercase__ ,'''max_weight must greater than zero.''' )
def SCREAMING_SNAKE_CASE ( self : Any ):
self.assertRaisesRegex(lowercase__ ,'''Weight can not be negative.''' )
def SCREAMING_SNAKE_CASE ( self : str ):
self.assertRaisesRegex(lowercase__ ,'''Profit can not be negative.''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
self.assertRaisesRegex(lowercase__ ,'''max_weight must greater than zero.''' )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
self.assertRaisesRegex(
lowercase__ ,'''The length of profit and weight must be same.''' )
if __name__ == "__main__":
unittest.main()
| 104 |
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase = 50 ):
_UpperCAmelCase : Tuple = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 234 | 0 |
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
__A = TypeVar('KEY')
__A = TypeVar('VAL')
@dataclass(frozen=__magic_name__ , slots=__magic_name__ )
class lowerCamelCase__ ( Generic[KEY, VAL] ):
'''simple docstring'''
lowerCamelCase = 42
lowerCamelCase = 42
class lowerCamelCase__ ( _Item ):
'''simple docstring'''
def __init__( self ) -> None:
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __bool__( self ) -> bool:
return False
__A = _DeletedItem()
class lowerCamelCase__ ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase = 8 , __UpperCAmelCase = 0.7_5 ) -> None:
_lowerCAmelCase =initial_block_size
_lowerCAmelCase =[None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
_lowerCAmelCase =capacity_factor
_lowerCAmelCase =0
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> int:
return hash(__UpperCAmelCase ) % len(self._buckets )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> int:
return (ind + 1) % len(self._buckets )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> bool:
_lowerCAmelCase =self._buckets[ind]
if not stored:
_lowerCAmelCase =_Item(__UpperCAmelCase , __UpperCAmelCase )
self._len += 1
return True
elif stored.key == key:
_lowerCAmelCase =_Item(__UpperCAmelCase , __UpperCAmelCase )
return True
else:
return False
def _lowerCAmelCase ( self ) -> bool:
_lowerCAmelCase =len(self._buckets ) * self._capacity_factor
return len(self ) >= int(__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> bool:
if len(self._buckets ) <= self._initial_block_size:
return False
_lowerCAmelCase =len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> None:
_lowerCAmelCase =self._buckets
_lowerCAmelCase =[None] * new_size
_lowerCAmelCase =0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def _lowerCAmelCase ( self ) -> None:
self._resize(len(self._buckets ) * 2 )
def _lowerCAmelCase ( self ) -> None:
self._resize(len(self._buckets ) // 2 )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Iterator[int]:
_lowerCAmelCase =self._get_bucket_index(__UpperCAmelCase )
for _ in range(len(self._buckets ) ):
yield ind
_lowerCAmelCase =self._get_next_ind(__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> None:
for ind in self._iterate_buckets(__UpperCAmelCase ):
if self._try_set(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
break
def __setitem__( self , __UpperCAmelCase , __UpperCAmelCase ) -> None:
if self._is_full():
self._size_up()
self._add_item(__UpperCAmelCase , __UpperCAmelCase )
def __delitem__( self , __UpperCAmelCase ) -> None:
for ind in self._iterate_buckets(__UpperCAmelCase ):
_lowerCAmelCase =self._buckets[ind]
if item is None:
raise KeyError(__UpperCAmelCase )
if item is _deleted:
continue
if item.key == key:
_lowerCAmelCase =_deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self , __UpperCAmelCase ) -> VAL:
for ind in self._iterate_buckets(__UpperCAmelCase ):
_lowerCAmelCase =self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(__UpperCAmelCase )
def __len__( self ) -> int:
return self._len
def __iter__( self ) -> Iterator[KEY]:
yield from (item.key for item in self._buckets if item)
def __repr__( self ) -> str:
_lowerCAmelCase =""" ,""".join(
f'''{item.key}: {item.val}''' for item in self._buckets if item )
return f'''HashMap({val_string})'''
| 341 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {}
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = '''llama'''
lowerCamelCase = ['''past_key_values''']
def __init__( self , __UpperCAmelCase=3_20_00 , __UpperCAmelCase=40_96 , __UpperCAmelCase=1_10_08 , __UpperCAmelCase=32 , __UpperCAmelCase=32 , __UpperCAmelCase=None , __UpperCAmelCase="silu" , __UpperCAmelCase=20_48 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1e-6 , __UpperCAmelCase=True , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=False , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> Optional[Any]:
_lowerCAmelCase =vocab_size
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =hidden_size
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =num_key_value_heads
_lowerCAmelCase =hidden_act
_lowerCAmelCase =initializer_range
_lowerCAmelCase =rms_norm_eps
_lowerCAmelCase =pretraining_tp
_lowerCAmelCase =use_cache
_lowerCAmelCase =rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , tie_word_embeddings=__UpperCAmelCase , **__UpperCAmelCase , )
def _lowerCAmelCase ( self ) -> str:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f'''got {self.rope_scaling}''' )
_lowerCAmelCase =self.rope_scaling.get("""type""" , __UpperCAmelCase )
_lowerCAmelCase =self.rope_scaling.get("""factor""" , __UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 341 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"sayakpaul/vit-msn-base": "https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[str] = 'vit_msn'
def __init__( self , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.0 , _a=0.0 , _a=0.02 , _a=1E-06 , _a=224 , _a=16 , _a=3 , _a=True , **_a , ):
super().__init__(**_a )
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = layer_norm_eps
__a = image_size
__a = patch_size
__a = num_channels
__a = qkv_bias
| 45 |
"""simple docstring"""
import math
def lowercase ( lowerCAmelCase__ : list , lowerCAmelCase__ : int ) -> int:
__a = len(lowerCAmelCase__ )
__a = int(math.floor(math.sqrt(lowerCAmelCase__ ) ) )
__a = 0
while arr[min(lowerCAmelCase__ , lowerCAmelCase__ ) - 1] < x:
__a = step
step += int(math.floor(math.sqrt(lowerCAmelCase__ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
__a = prev + 1
if prev == min(lowerCAmelCase__ , lowerCAmelCase__ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
lowercase_ = input("Enter numbers separated by a comma:\n").strip()
lowercase_ = [int(item) for item in user_input.split(",")]
lowercase_ = int(input("Enter the number to be searched:\n"))
lowercase_ = jump_search(arr, x)
if res == -1:
print("Number not found!")
else:
print(F'''Number {x} is at index {res}''')
| 45 | 1 |
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def lowerCamelCase ( UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : int ) -> float:
lowercase_ : List[Any] = x
lowercase_ : Any = y
for step in range(UpperCAmelCase__ ): # noqa: B007
lowercase_ : Dict = a * a - b * b + x
lowercase_ : str = 2 * a * b + y
lowercase_ : Optional[Any] = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def lowerCamelCase ( UpperCAmelCase__ : float ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def lowerCamelCase ( UpperCAmelCase__ : float ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(UpperCAmelCase__ , 1 , 1 ) )
def lowerCamelCase ( UpperCAmelCase__ : int = 800 , UpperCAmelCase__ : int = 600 , UpperCAmelCase__ : float = -0.6 , UpperCAmelCase__ : float = 0 , UpperCAmelCase__ : float = 3.2 , UpperCAmelCase__ : int = 50 , UpperCAmelCase__ : bool = True , ) -> Image.Image:
lowercase_ : Union[str, Any] = Image.new("""RGB""" , (image_width, image_height) )
lowercase_ : Tuple = img.load()
# loop through the image-coordinates
for image_x in range(UpperCAmelCase__ ):
for image_y in range(UpperCAmelCase__ ):
# determine the figure-coordinates based on the image-coordinates
lowercase_ : Any = figure_width / image_width * image_height
lowercase_ : Tuple = figure_center_x + (image_x / image_width - 0.5) * figure_width
lowercase_ : Union[str, Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height
lowercase_ : str = get_distance(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
lowercase_ : List[Any] = get_color_coded_rgb(UpperCAmelCase__ )
else:
lowercase_ : Dict = get_black_and_white_rgb(UpperCAmelCase__ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
_lowercase : List[str] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 21 | '''simple docstring'''
import os
import numpy
import onnx
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str ) -> Tuple:
lowercase_ : Tuple = a.name
lowercase_ : Tuple = b.name
lowercase_ : Any = """"""
lowercase_ : List[Any] = """"""
lowercase_ : List[Any] = a == b
lowercase_ : Union[str, Any] = name_a
lowercase_ : Optional[Any] = name_b
return res
def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(UpperCAmelCase__ , UpperCAmelCase__ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase__ , UpperCAmelCase__ )
_graph_replace_input_with(node_proto.attribute[1].g , UpperCAmelCase__ , UpperCAmelCase__ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str ) -> int:
for n in graph_proto.node:
_node_replace_input_with(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict ) -> List[str]:
lowercase_ : int = list(model.graph.initializer )
lowercase_ : List[str] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
lowercase_ : Optional[Any] = inits[i].name
lowercase_ : List[str] = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : int ) -> List[str]:
lowercase_ : Dict = os.path.dirname(UpperCAmelCase__ )
lowercase_ : Optional[Any] = os.path.basename(UpperCAmelCase__ )
lowercase_ : str = onnx.load(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) )
lowercase_ : List[Any] = list(model.graph.initializer )
lowercase_ : int = set()
lowercase_ : int = {}
lowercase_ : str = []
lowercase_ : int = 0
for i in range(len(UpperCAmelCase__ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(UpperCAmelCase__ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(UpperCAmelCase__ )
dup_set.add(UpperCAmelCase__ )
lowercase_ : Dict = inits[j].data_type
lowercase_ : List[str] = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("""unexpected data type: """ , UpperCAmelCase__ )
total_reduced_size += mem_size
lowercase_ : int = inits[i].name
lowercase_ : List[str] = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(UpperCAmelCase__ )
else:
lowercase_ : Optional[int] = [name_j]
ind_to_replace.append((j, i) )
print("""total reduced size: """ , total_reduced_size / 1024 / 1024 / 1024 , """GB""" )
lowercase_ : Tuple = sorted(UpperCAmelCase__ )
_remove_dup_initializers_from_model(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : Union[str, Any] = """optimized_""" + model_file_name
lowercase_ : Optional[int] = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
onnx.save(UpperCAmelCase__ , UpperCAmelCase__ )
return new_model
| 21 | 1 |
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase , unittest.TestCase ):
__lowerCAmelCase = BertTokenizer
__lowerCAmelCase = BertTokenizerFast
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = filter_non_english
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
super().setUp()
UpperCamelCase = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Tuple ):
"""simple docstring"""
UpperCamelCase = '''UNwant\u00E9d,running'''
UpperCamelCase = '''unwanted, running'''
return input_text, output_text
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self.tokenizer_class(self.vocab_file )
UpperCamelCase = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(A_ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [9, 6, 7, 12, 10, 11] )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_rust_tokenizer()
UpperCamelCase = '''UNwant\u00E9d,running'''
UpperCamelCase = tokenizer.tokenize(A_ )
UpperCamelCase = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
UpperCamelCase = tokenizer.encode(A_ , add_special_tokens=A_ )
UpperCamelCase = rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
UpperCamelCase = self.get_rust_tokenizer()
UpperCamelCase = tokenizer.encode(A_ )
UpperCamelCase = rust_tokenizer.encode(A_ )
self.assertListEqual(A_ , A_ )
# With lower casing
UpperCamelCase = self.get_tokenizer(do_lower_case=A_ )
UpperCamelCase = self.get_rust_tokenizer(do_lower_case=A_ )
UpperCamelCase = '''UNwant\u00E9d,running'''
UpperCamelCase = tokenizer.tokenize(A_ )
UpperCamelCase = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
UpperCamelCase = tokenizer.encode(A_ , add_special_tokens=A_ )
UpperCamelCase = rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
UpperCamelCase = self.get_rust_tokenizer()
UpperCamelCase = tokenizer.encode(A_ )
UpperCamelCase = rust_tokenizer.encode(A_ )
self.assertListEqual(A_ , A_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = BasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = BasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = BasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = BasicTokenizer(do_lower_case=A_ , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = BasicTokenizer()
UpperCamelCase = '''a\n\'ll !!to?\'d of, can\'t.'''
UpperCamelCase = ['''a''', '''\'''', '''ll''', '''!''', '''!''', '''to''', '''?''', '''\'''', '''d''', '''of''', ''',''', '''can''', '''\'''', '''t''', '''.''']
self.assertListEqual(tokenizer.tokenize(A_ ) , A_ )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
UpperCamelCase = {}
for i, token in enumerate(A_ ):
UpperCamelCase = i
UpperCamelCase = WordpieceTokenizer(vocab=A_ , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(A_ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
self.assertListEqual(
[rust_tokenizer.tokenize(A_ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
@slow
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = self.tokenizer_class.from_pretrained("""bert-base-uncased""" )
UpperCamelCase = tokenizer.encode("""sequence builders""" , add_special_tokens=A_ )
UpperCamelCase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=A_ )
UpperCamelCase = tokenizer.build_inputs_with_special_tokens(A_ )
UpperCamelCase = tokenizer.build_inputs_with_special_tokens(A_ , A_ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCamelCase = self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
UpperCamelCase = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
UpperCamelCase = tokenizer_r.encode_plus(
A_ , return_attention_mask=A_ , return_token_type_ids=A_ , return_offsets_mapping=A_ , add_special_tokens=A_ , )
UpperCamelCase = tokenizer_r.do_lower_case if hasattr(A_ , """do_lower_case""" ) else False
UpperCamelCase = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = ['''的''', '''人''', '''有''']
UpperCamelCase = ''''''.join(A_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCamelCase = True
UpperCamelCase = self.tokenizer_class.from_pretrained(A_ , **A_ )
UpperCamelCase = self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
UpperCamelCase = tokenizer_p.encode(A_ , add_special_tokens=A_ )
UpperCamelCase = tokenizer_r.encode(A_ , add_special_tokens=A_ )
UpperCamelCase = tokenizer_r.convert_ids_to_tokens(A_ )
UpperCamelCase = tokenizer_p.convert_ids_to_tokens(A_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , A_ )
UpperCamelCase = False
UpperCamelCase = self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
UpperCamelCase = self.tokenizer_class.from_pretrained(A_ , **A_ )
UpperCamelCase = tokenizer_r.encode(A_ , add_special_tokens=A_ )
UpperCamelCase = tokenizer_p.encode(A_ , add_special_tokens=A_ )
UpperCamelCase = tokenizer_r.convert_ids_to_tokens(A_ )
UpperCamelCase = tokenizer_p.convert_ids_to_tokens(A_ )
# it is expected that only the first Chinese character is not preceded by "##".
UpperCamelCase = [
f"""##{token}""" if idx != 0 else token for idx, token in enumerate(A_ )
]
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , A_ )
| 343 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
_UpperCamelCase = logging.get_logger(__name__)
def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
def constraint_to_multiple_of(lowercase__ , lowercase__ , lowercase__=0 , lowercase__=None ):
__lowerCAmelCase : int = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
__lowerCAmelCase : Optional[int] = math.floor(val / multiple ) * multiple
if x < min_val:
__lowerCAmelCase : Any = math.ceil(val / multiple ) * multiple
return x
__lowerCAmelCase : Dict = (output_size, output_size) if isinstance(lowercase__ , lowercase__ ) else output_size
__lowerCAmelCase, __lowerCAmelCase : Optional[Any] = get_image_size(lowercase__ )
__lowerCAmelCase, __lowerCAmelCase : int = output_size
# determine new height and width
__lowerCAmelCase : Optional[Any] = output_height / input_height
__lowerCAmelCase : List[Any] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
__lowerCAmelCase : str = scale_width
else:
# fit height
__lowerCAmelCase : str = scale_height
__lowerCAmelCase : Any = constraint_to_multiple_of(scale_height * input_height , multiple=lowercase__ )
__lowerCAmelCase : Union[str, Any] = constraint_to_multiple_of(scale_width * input_width , multiple=lowercase__ )
return (new_height, new_width)
class __lowercase (_UpperCAmelCase ):
_UpperCamelCase = ["""pixel_values"""]
def __init__( self , A_ = True , A_ = None , A_ = PILImageResampling.BILINEAR , A_ = False , A_ = 1 , A_ = True , A_ = 1 / 255 , A_ = True , A_ = None , A_ = None , **A_ , ) ->None:
'''simple docstring'''
super().__init__(**A_ )
__lowerCAmelCase : Union[str, Any] = size if size is not None else {'''height''': 384, '''width''': 384}
__lowerCAmelCase : Dict = get_size_dict(A_ )
__lowerCAmelCase : Optional[Any] = do_resize
__lowerCAmelCase : int = size
__lowerCAmelCase : Dict = keep_aspect_ratio
__lowerCAmelCase : List[Any] = ensure_multiple_of
__lowerCAmelCase : Tuple = resample
__lowerCAmelCase : Dict = do_rescale
__lowerCAmelCase : Any = rescale_factor
__lowerCAmelCase : List[Any] = do_normalize
__lowerCAmelCase : Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowerCAmelCase : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase__ ( self , A_ , A_ , A_ = False , A_ = 1 , A_ = PILImageResampling.BICUBIC , A_ = None , **A_ , ) ->np.ndarray:
'''simple docstring'''
__lowerCAmelCase : int = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
__lowerCAmelCase : Union[str, Any] = get_resize_output_image_size(
A_ , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=A_ , multiple=A_ , )
return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ )
def UpperCamelCase__ ( self , A_ , A_ , A_ = None , **A_ , ) ->Dict:
'''simple docstring'''
return rescale(A_ , scale=A_ , data_format=A_ , **A_ )
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ = None , **A_ , ) ->np.ndarray:
'''simple docstring'''
return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ )
def UpperCamelCase__ ( self , A_ , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = ChannelDimension.FIRST , **A_ , ) ->PIL.Image.Image:
'''simple docstring'''
__lowerCAmelCase : int = do_resize if do_resize is not None else self.do_resize
__lowerCAmelCase : Optional[int] = size if size is not None else self.size
__lowerCAmelCase : Union[str, Any] = get_size_dict(A_ )
__lowerCAmelCase : List[Any] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
__lowerCAmelCase : Optional[int] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
__lowerCAmelCase : Tuple = resample if resample is not None else self.resample
__lowerCAmelCase : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
__lowerCAmelCase : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCAmelCase : Tuple = do_normalize if do_normalize is not None else self.do_normalize
__lowerCAmelCase : str = image_mean if image_mean is not None else self.image_mean
__lowerCAmelCase : Optional[Any] = image_std if image_std is not None else self.image_std
__lowerCAmelCase : Optional[Any] = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__lowerCAmelCase : Any = [to_numpy_array(A_ ) for image in images]
if do_resize:
__lowerCAmelCase : Optional[Any] = [self.resize(image=A_ , size=A_ , resample=A_ ) for image in images]
if do_rescale:
__lowerCAmelCase : Tuple = [self.rescale(image=A_ , scale=A_ ) for image in images]
if do_normalize:
__lowerCAmelCase : str = [self.normalize(image=A_ , mean=A_ , std=A_ ) for image in images]
__lowerCAmelCase : Union[str, Any] = [to_channel_dimension_format(A_ , A_ ) for image in images]
__lowerCAmelCase : Dict = {'''pixel_values''': images}
return BatchFeature(data=A_ , tensor_type=A_ )
def UpperCamelCase__ ( self , A_ , A_ = None ) ->Any:
'''simple docstring'''
__lowerCAmelCase : Any = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A_ ) != len(A_ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(A_ ):
__lowerCAmelCase : Optional[int] = target_sizes.numpy()
__lowerCAmelCase : List[str] = []
for idx in range(len(A_ ) ):
__lowerCAmelCase : Any = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=A_ )
__lowerCAmelCase : str = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(A_ )
else:
__lowerCAmelCase : Any = logits.argmax(dim=1 )
__lowerCAmelCase : List[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 275 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _a ( unittest.TestCase ):
'''simple docstring'''
def __init__( self, A, A=3, A=32, A=3, A=10, A=[10, 20, 30, 40], A=[1, 1, 2, 1], A=True, A=True, A="relu", A=3, A=None, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = parent
SCREAMING_SNAKE_CASE : Dict = batch_size
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : int = num_channels
SCREAMING_SNAKE_CASE : str = embeddings_size
SCREAMING_SNAKE_CASE : str = hidden_sizes
SCREAMING_SNAKE_CASE : Dict = depths
SCREAMING_SNAKE_CASE : Union[str, Any] = is_training
SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE : Union[str, Any] = num_labels
SCREAMING_SNAKE_CASE : List[str] = scope
SCREAMING_SNAKE_CASE : Tuple = len(A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : int = self.get_config()
return config, pixel_values
def UpperCamelCase_ ( self ):
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels, embeddings_size=self.embeddings_size, hidden_sizes=self.hidden_sizes, depths=self.depths, hidden_act=self.hidden_act, num_labels=self.num_labels, image_size=self.image_size, )
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = FlaxRegNetModel(config=A )
SCREAMING_SNAKE_CASE : Optional[Any] = model(A )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), )
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_labels
SCREAMING_SNAKE_CASE : List[str] = FlaxRegNetForImageClassification(config=A )
SCREAMING_SNAKE_CASE : str = model(A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : Optional[Any] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
A : Dict = False
A : int = False
A : List[Any] = False
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = FlaxRegNetModelTester(self )
SCREAMING_SNAKE_CASE : str = ConfigTester(self, config_class=A, has_text_modality=A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase_ ( self ):
'''simple docstring'''
return
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : int = model_class(A )
SCREAMING_SNAKE_CASE : Optional[Any] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : int = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Any = ['pixel_values']
self.assertListEqual(arg_names[:1], A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
def check_hidden_states_output(A, A, A ):
SCREAMING_SNAKE_CASE : Optional[int] = model_class(A )
SCREAMING_SNAKE_CASE : Dict = model(**self._prepare_for_class(A, A ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.num_stages
self.assertEqual(len(A ), expected_num_stages + 1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[Any] = True
check_hidden_states_output(A, A, A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Any = True
check_hidden_states_output(A, A, A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE : List[str] = self._prepare_for_class(A, A )
SCREAMING_SNAKE_CASE : str = model_class(A )
@jax.jit
def model_jitted(A, **A ):
return model(pixel_values=A, **A )
with self.subTest('JIT Enabled' ):
SCREAMING_SNAKE_CASE : str = model_jitted(**A ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE : Union[str, Any] = model_jitted(**A ).to_tuple()
self.assertEqual(len(A ), len(A ) )
for jitted_output, output in zip(A, A ):
self.assertEqual(jitted_output.shape, output.shape )
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_flax
class _a ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('facebook/regnet-y-040' ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = FlaxRegNetForImageClassification.from_pretrained('facebook/regnet-y-040' )
SCREAMING_SNAKE_CASE : int = self.default_image_processor
SCREAMING_SNAKE_CASE : Dict = prepare_img()
SCREAMING_SNAKE_CASE : str = image_processor(images=A, return_tensors='np' )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(**A )
# verify the logits
SCREAMING_SNAKE_CASE : int = (1, 1_000)
self.assertEqual(outputs.logits.shape, A )
SCREAMING_SNAKE_CASE : Optional[int] = jnp.array([-0.41_80, -1.50_51, -3.48_36] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3], A, atol=1E-4 ) )
| 246 |
'''simple docstring'''
def lowercase__( __UpperCamelCase: list[list[float]] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : list[list[float]] = []
for data in source_data:
for i, el in enumerate(__UpperCamelCase ):
if len(__UpperCamelCase ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(__UpperCamelCase ) )
return data_lists
def lowercase__( __UpperCamelCase: list[list[float]] ,__UpperCamelCase: list[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : list[list[float]] = []
for dlist, weight in zip(__UpperCamelCase ,__UpperCamelCase ):
SCREAMING_SNAKE_CASE : Optional[int] = min(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Any = max(__UpperCamelCase )
SCREAMING_SNAKE_CASE : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
SCREAMING_SNAKE_CASE : List[Any] = f"Invalid weight of {weight:f} provided"
raise ValueError(__UpperCamelCase )
score_lists.append(__UpperCamelCase )
return score_lists
def lowercase__( __UpperCamelCase: list[list[float]] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(__UpperCamelCase ):
SCREAMING_SNAKE_CASE : Any = final_scores[j] + ele
return final_scores
def lowercase__( __UpperCamelCase: list[list[float]] ,__UpperCamelCase: list[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = get_data(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Dict = calculate_each_score(__UpperCamelCase ,__UpperCamelCase )
SCREAMING_SNAKE_CASE : Any = generate_final_scores(__UpperCamelCase )
# append scores to source data
for i, ele in enumerate(__UpperCamelCase ):
source_data[i].append(__UpperCamelCase )
return source_data
| 246 | 1 |
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE :Optional[int] = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
SCREAMING_SNAKE_CASE :str = 25_0004
SCREAMING_SNAKE_CASE :Dict = 25_0020
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( A_ , unittest.TestCase ):
'''simple docstring'''
snake_case_ = MBartaaTokenizer
snake_case_ = MBartaaTokenizerFast
snake_case_ = True
snake_case_ = True
def UpperCamelCase_ ( self : int ):
super().setUp()
# We have a SentencePiece fixture for testing
__A = MBartaaTokenizer(snake_case__ ,src_lang="en_XX" ,tgt_lang="ro_RO" ,keep_accents=snake_case__ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self : List[str] ):
__A = "<s>"
__A = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) ,snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) ,snake_case__ )
def UpperCamelCase_ ( self : str ):
__A = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"<s>" )
self.assertEqual(vocab_keys[1] ,"<pad>" )
self.assertEqual(vocab_keys[-1] ,"<mask>" )
self.assertEqual(len(snake_case__ ) ,10_54 )
def UpperCamelCase_ ( self : int ):
self.assertEqual(self.get_tokenizer().vocab_size ,10_54 )
def UpperCamelCase_ ( self : Any ):
__A = MBartaaTokenizer(snake_case__ ,src_lang="en_XX" ,tgt_lang="ro_RO" ,keep_accents=snake_case__ )
__A = tokenizer.tokenize("This is a test" )
self.assertListEqual(snake_case__ ,["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case__ ) ,[value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] ,)
__A = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
snake_case__ ,[SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] ,)
__A = tokenizer.convert_tokens_to_ids(snake_case__ )
self.assertListEqual(
snake_case__ ,[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] ,)
__A = tokenizer.convert_ids_to_tokens(snake_case__ )
self.assertListEqual(
snake_case__ ,[SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] ,)
@slow
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = {"input_ids": [[25_00_04, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [25_00_04, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_00_04, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ ,model_name="facebook/mbart-large-50" ,revision="d3913889c59cd5c9e456b269c376325eabad57e2" ,)
def UpperCamelCase_ ( self : List[str] ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__A = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart50", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__A = self.rust_tokenizer_class.from_pretrained(snake_case__ ,**snake_case__ )
__A = self.tokenizer_class.from_pretrained(snake_case__ ,**snake_case__ )
__A = tempfile.mkdtemp()
__A = tokenizer_r.save_pretrained(snake_case__ )
__A = tokenizer_p.save_pretrained(snake_case__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
__A = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(snake_case__ ,snake_case__ )
# Checks everything loads correctly in the same way
__A = tokenizer_r.from_pretrained(snake_case__ )
__A = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ ,snake_case__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(snake_case__ )
# Save tokenizer rust, legacy_format=True
__A = tempfile.mkdtemp()
__A = tokenizer_r.save_pretrained(snake_case__ ,legacy_format=snake_case__ )
__A = tokenizer_p.save_pretrained(snake_case__ )
# Checks it save with the same files
self.assertSequenceEqual(snake_case__ ,snake_case__ )
# Checks everything loads correctly in the same way
__A = tokenizer_r.from_pretrained(snake_case__ )
__A = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ ,snake_case__ ) )
shutil.rmtree(snake_case__ )
# Save tokenizer rust, legacy_format=False
__A = tempfile.mkdtemp()
__A = tokenizer_r.save_pretrained(snake_case__ ,legacy_format=snake_case__ )
__A = tokenizer_p.save_pretrained(snake_case__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__A = tokenizer_r.from_pretrained(snake_case__ )
__A = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ ,snake_case__ ) )
shutil.rmtree(snake_case__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
snake_case_ = "facebook/mbart-large-50-one-to-many-mmt"
snake_case_ = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
snake_case_ = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
snake_case_ = [EN_CODE, 8274, 127873, 25916, 7, 8622, 2071, 438, 67485, 53, 187895, 23, 51712, 2]
@classmethod
def UpperCamelCase_ ( cls : Optional[int] ):
__A = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name ,src_lang="en_XX" ,tgt_lang="ro_RO" )
__A = 1
return cls
def UpperCamelCase_ ( self : int ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] ,25_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] ,25_00_04 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] ,25_00_20 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["mr_IN"] ,25_00_38 )
def UpperCamelCase_ ( self : int ):
__A = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens ,snake_case__ )
def UpperCamelCase_ ( self : List[str] ):
self.assertIn(snake_case__ ,self.tokenizer.all_special_ids )
__A = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2]
__A = self.tokenizer.decode(snake_case__ ,skip_special_tokens=snake_case__ )
__A = self.tokenizer.decode(generated_ids[1:] ,skip_special_tokens=snake_case__ )
self.assertEqual(snake_case__ ,snake_case__ )
self.assertNotIn(self.tokenizer.eos_token ,snake_case__ )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0] ,snake_case__ )
__A = 10
__A = self.tokenizer(snake_case__ ,max_length=snake_case__ ,truncation=snake_case__ ).input_ids[0]
self.assertEqual(ids[0] ,snake_case__ )
self.assertEqual(ids[-1] ,2 )
self.assertEqual(len(snake_case__ ) ,snake_case__ )
def UpperCamelCase_ ( self : str ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) ,[25_00_53, 25_00_01] )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = tempfile.mkdtemp()
__A = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(snake_case__ )
__A = MBartaaTokenizer.from_pretrained(snake_case__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids ,snake_case__ )
@require_torch
def UpperCamelCase_ ( self : Optional[int] ):
__A = self.tokenizer(self.src_text ,text_target=self.tgt_text ,padding=snake_case__ ,return_tensors="pt" )
__A = shift_tokens_right(batch["labels"] ,self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = self.tokenizer(
self.src_text ,text_target=self.tgt_text ,padding=snake_case__ ,truncation=snake_case__ ,max_length=len(self.expected_src_tokens ) ,return_tensors="pt" ,)
__A = shift_tokens_right(batch["labels"] ,self.tokenizer.pad_token_id )
self.assertIsInstance(snake_case__ ,snake_case__ )
self.assertEqual((2, 14) ,batch.input_ids.shape )
self.assertEqual((2, 14) ,batch.attention_mask.shape )
__A = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens ,snake_case__ )
self.assertEqual(2 ,batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens ,[EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] )
def UpperCamelCase_ ( self : List[Any] ):
__A = self.tokenizer(self.src_text ,padding=snake_case__ ,truncation=snake_case__ ,max_length=3 ,return_tensors="pt" )
__A = self.tokenizer(
text_target=self.tgt_text ,padding=snake_case__ ,truncation=snake_case__ ,max_length=10 ,return_tensors="pt" )
__A = targets["input_ids"]
__A = shift_tokens_right(snake_case__ ,self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] ,3 )
self.assertEqual(batch.decoder_input_ids.shape[1] ,10 )
@require_torch
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.tokenizer._build_translation_inputs(
"A test" ,return_tensors="pt" ,src_lang="en_XX" ,tgt_lang="ar_AR" )
self.assertEqual(
nested_simplify(snake_case__ ) ,{
# en_XX, A, test, EOS
"input_ids": [[25_00_04, 62, 30_34, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 25_00_01,
} ,)
| 15 |
from __future__ import annotations
__lowerCamelCase = list[list[int]]
# assigning initial values to the grid
__lowerCamelCase = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__lowerCamelCase = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def UpperCamelCase ( __lowerCamelCase : Matrix , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ):
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def UpperCamelCase ( __lowerCamelCase : Matrix ):
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def UpperCamelCase ( __lowerCamelCase : Matrix ):
if location := find_empty_location(__lowerCamelCase ):
snake_case , snake_case : Union[str, Any] = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
snake_case : List[Any] = digit
if sudoku(__lowerCamelCase ) is not None:
return grid
snake_case : Union[str, Any] = 0
return None
def UpperCamelCase ( __lowerCamelCase : Matrix ):
for row in grid:
for cell in row:
print(__lowerCamelCase , end=" " )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("""\nExample grid:\n""" + """=""" * 20)
print_solution(example_grid)
print("""\nExample grid solution:""")
__lowerCamelCase = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("""Cannot find a solution.""")
| 59 | 0 |
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_( lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
__lowercase : Union[str, Any] = XLMRobertaTokenizer
__lowercase : str = XLMRobertaTokenizerFast
__lowercase : Tuple = True
__lowercase : Dict = True
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ : List[str] = XLMRobertaTokenizer(__UpperCAmelCase ,keep_accents=__UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
lowerCAmelCase__ : Optional[Any] = "<pad>"
lowerCAmelCase__ : Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) ,__UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) ,__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> Any:
lowerCAmelCase__ : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"""<s>""" )
self.assertEqual(vocab_keys[1] ,"""<pad>""" )
self.assertEqual(vocab_keys[-1] ,"""<mask>""" )
self.assertEqual(len(__UpperCAmelCase ) ,1002 )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size ,1002 )
def UpperCAmelCase_ ( self ) -> int:
lowerCAmelCase__ : Tuple = XLMRobertaTokenizer(__UpperCAmelCase ,keep_accents=__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__UpperCAmelCase ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) ,[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] ,)
lowerCAmelCase__ : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__UpperCAmelCase ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] ,)
lowerCAmelCase__ : int = tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase ,[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] ,)
lowerCAmelCase__ : str = tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] ,)
def UpperCAmelCase_ ( self ) -> List[str]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCAmelCase__ : List[Any] = (self.rust_tokenizer_class, "hf-internal-testing/tiny-xlm-roberta", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase__ : List[str] = self.rust_tokenizer_class.from_pretrained(__UpperCAmelCase ,**__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = self.tokenizer_class.from_pretrained(__UpperCAmelCase ,**__UpperCAmelCase )
lowerCAmelCase__ : List[str] = tempfile.mkdtemp()
lowerCAmelCase__ : Optional[Any] = tokenizer_r.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ : Any = tokenizer_p.save_pretrained(__UpperCAmelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
lowerCAmelCase__ : Dict = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(__UpperCAmelCase ,__UpperCAmelCase )
# Checks everything loads correctly in the same way
lowerCAmelCase__ : Optional[int] = tokenizer_r.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = tokenizer_p.from_pretrained(__UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__UpperCAmelCase ,__UpperCAmelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__UpperCAmelCase )
# Save tokenizer rust, legacy_format=True
lowerCAmelCase__ : List[str] = tempfile.mkdtemp()
lowerCAmelCase__ : Dict = tokenizer_r.save_pretrained(__UpperCAmelCase ,legacy_format=__UpperCAmelCase )
lowerCAmelCase__ : Any = tokenizer_p.save_pretrained(__UpperCAmelCase )
# Checks it save with the same files
self.assertSequenceEqual(__UpperCAmelCase ,__UpperCAmelCase )
# Checks everything loads correctly in the same way
lowerCAmelCase__ : List[str] = tokenizer_r.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ : Dict = tokenizer_p.from_pretrained(__UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__UpperCAmelCase ,__UpperCAmelCase ) )
shutil.rmtree(__UpperCAmelCase )
# Save tokenizer rust, legacy_format=False
lowerCAmelCase__ : int = tempfile.mkdtemp()
lowerCAmelCase__ : Optional[int] = tokenizer_r.save_pretrained(__UpperCAmelCase ,legacy_format=__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = tokenizer_p.save_pretrained(__UpperCAmelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCAmelCase__ : List[str] = tokenizer_r.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ : Any = tokenizer_p.from_pretrained(__UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__UpperCAmelCase ,__UpperCAmelCase ) )
shutil.rmtree(__UpperCAmelCase )
@cached_property
def UpperCAmelCase_ ( self ) -> int:
return XLMRobertaTokenizer.from_pretrained("""xlm-roberta-base""" )
def UpperCAmelCase_ ( self ) -> Tuple:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(__UpperCAmelCase ,f.name )
lowerCAmelCase__ : Union[str, Any] = XLMRobertaTokenizer(f.name ,keep_accents=__UpperCAmelCase )
lowerCAmelCase__ : int = pickle.dumps(__UpperCAmelCase )
pickle.loads(__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
if not self.test_rust_tokenizer:
return
lowerCAmelCase__ : Any = self.get_tokenizer()
lowerCAmelCase__ : Any = self.get_rust_tokenizer()
lowerCAmelCase__ : Dict = "I was born in 92000, and this is falsé."
lowerCAmelCase__ : List[str] = tokenizer.tokenize(__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = rust_tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase ,__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = tokenizer.encode(__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase )
lowerCAmelCase__ : str = rust_tokenizer.encode(__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase ,__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = self.get_rust_tokenizer()
lowerCAmelCase__ : Optional[Any] = tokenizer.encode(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = rust_tokenizer.encode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase ,__UpperCAmelCase )
@slow
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
lowerCAmelCase__ : List[str] = "Hello World!"
lowerCAmelCase__ : str = [0, 3_5378, 6661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__UpperCAmelCase ,self.big_tokenizer.encode(__UpperCAmelCase ) )
@slow
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
lowerCAmelCase__ : Any = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
lowerCAmelCase__ : Union[str, Any] = [
0,
3293,
83,
10,
4552,
4989,
7986,
678,
10,
5915,
111,
17_9459,
12_4850,
4,
6044,
237,
12,
6,
5,
6,
4,
6780,
705,
15,
1388,
44,
378,
1_0114,
711,
152,
20,
6,
5,
2_2376,
642,
1221,
1_5190,
3_4153,
450,
5608,
959,
1119,
5_7702,
136,
186,
47,
1098,
2_9367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6044,
237,
6284,
5_0901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__UpperCAmelCase ,self.big_tokenizer.encode(__UpperCAmelCase ) )
@slow
def UpperCAmelCase_ ( self ) -> Tuple:
lowerCAmelCase__ : str = {"input_ids": [[0, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [0, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase ,model_name="""xlm-roberta-base""" ,revision="""d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3""" ,)
| 354 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''',
}
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Optional[Any] = '''instructblip_vision_model'''
def __init__( self ,__UpperCAmelCase=1408 ,__UpperCAmelCase=6144 ,__UpperCAmelCase=39 ,__UpperCAmelCase=16 ,__UpperCAmelCase=224 ,__UpperCAmelCase=14 ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=1E-6 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=1E-10 ,__UpperCAmelCase=True ,**__UpperCAmelCase ,) -> List[Any]:
super().__init__(**__UpperCAmelCase )
lowerCAmelCase__ : Any = hidden_size
lowerCAmelCase__ : List[str] = intermediate_size
lowerCAmelCase__ : str = num_hidden_layers
lowerCAmelCase__ : Tuple = num_attention_heads
lowerCAmelCase__ : Union[str, Any] = patch_size
lowerCAmelCase__ : int = image_size
lowerCAmelCase__ : Tuple = initializer_range
lowerCAmelCase__ : Optional[int] = attention_dropout
lowerCAmelCase__ : List[str] = layer_norm_eps
lowerCAmelCase__ : List[Any] = hidden_act
lowerCAmelCase__ : int = qkv_bias
@classmethod
def UpperCAmelCase_ ( cls ,__UpperCAmelCase ,**__UpperCAmelCase ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = cls.get_config_dict(__UpperCAmelCase ,**__UpperCAmelCase )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("""model_type""" ) == "instructblip":
lowerCAmelCase__ : Optional[int] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__UpperCAmelCase ,**__UpperCAmelCase )
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : int = '''instructblip_qformer'''
def __init__( self ,__UpperCAmelCase=3_0522 ,__UpperCAmelCase=768 ,__UpperCAmelCase=12 ,__UpperCAmelCase=12 ,__UpperCAmelCase=3072 ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=512 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=1E-12 ,__UpperCAmelCase=0 ,__UpperCAmelCase="absolute" ,__UpperCAmelCase=2 ,__UpperCAmelCase=1408 ,**__UpperCAmelCase ,) -> Tuple:
super().__init__(pad_token_id=__UpperCAmelCase ,**__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = vocab_size
lowerCAmelCase__ : Optional[Any] = hidden_size
lowerCAmelCase__ : Any = num_hidden_layers
lowerCAmelCase__ : int = num_attention_heads
lowerCAmelCase__ : Tuple = hidden_act
lowerCAmelCase__ : int = intermediate_size
lowerCAmelCase__ : Tuple = hidden_dropout_prob
lowerCAmelCase__ : Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase__ : Optional[Any] = max_position_embeddings
lowerCAmelCase__ : Any = initializer_range
lowerCAmelCase__ : Tuple = layer_norm_eps
lowerCAmelCase__ : Dict = position_embedding_type
lowerCAmelCase__ : int = cross_attention_frequency
lowerCAmelCase__ : List[Any] = encoder_hidden_size
@classmethod
def UpperCAmelCase_ ( cls ,__UpperCAmelCase ,**__UpperCAmelCase ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ : Any = cls.get_config_dict(__UpperCAmelCase ,**__UpperCAmelCase )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("""model_type""" ) == "instructblip":
lowerCAmelCase__ : Tuple = config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__UpperCAmelCase ,**__UpperCAmelCase )
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Any = '''instructblip'''
__lowercase : str = True
def __init__( self ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=32 ,**__UpperCAmelCase ) -> Any:
super().__init__(**__UpperCAmelCase )
if vision_config is None:
lowerCAmelCase__ : Any = {}
logger.info("""vision_config is None. initializing the InstructBlipVisionConfig with default values.""" )
if qformer_config is None:
lowerCAmelCase__ : List[str] = {}
logger.info("""qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.""" )
if text_config is None:
lowerCAmelCase__ : List[Any] = {}
logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" )
lowerCAmelCase__ : Any = InstructBlipVisionConfig(**__UpperCAmelCase )
lowerCAmelCase__ : Tuple = InstructBlipQFormerConfig(**__UpperCAmelCase )
lowerCAmelCase__ : Tuple = text_config["""model_type"""] if """model_type""" in text_config else """opt"""
lowerCAmelCase__ : Any = CONFIG_MAPPING[text_model_type](**__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = self.text_config.tie_word_embeddings
lowerCAmelCase__ : Any = self.text_config.is_encoder_decoder
lowerCAmelCase__ : int = num_query_tokens
lowerCAmelCase__ : List[str] = self.vision_config.hidden_size
lowerCAmelCase__ : Optional[int] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
lowerCAmelCase__ : Optional[Any] = 1.0
lowerCAmelCase__ : Dict = 0.0_2
@classmethod
def UpperCAmelCase_ ( cls ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,**__UpperCAmelCase ,) -> Union[str, Any]:
return cls(
vision_config=vision_config.to_dict() ,qformer_config=qformer_config.to_dict() ,text_config=text_config.to_dict() ,**__UpperCAmelCase ,)
def UpperCAmelCase_ ( self ) -> str:
lowerCAmelCase__ : List[Any] = copy.deepcopy(self.__dict__ )
lowerCAmelCase__ : str = self.vision_config.to_dict()
lowerCAmelCase__ : Union[str, Any] = self.qformer_config.to_dict()
lowerCAmelCase__ : Union[str, Any] = self.text_config.to_dict()
lowerCAmelCase__ : str = self.__class__.model_type
return output
| 184 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
def a ( A__ : List[str] ) -> int:
"""simple docstring"""
_lowercase ='huggingface/label-files'
_lowercase ='imagenet-1k-id2label.json'
_lowercase =json.load(open(hf_hub_download(A__ , A__ , repo_type='dataset' ) , 'r' ) )
_lowercase ={int(A__ ): v for k, v in idalabel.items()}
_lowercase ={v: k for k, v in idalabel.items()}
_lowercase ='std_conv' if 'bit' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
_lowercase =BitConfig(
conv_layer=A__ , num_labels=1000 , idalabel=A__ , labelaid=A__ , )
return config
def a ( A__ : List[Any] ) -> Any:
"""simple docstring"""
if "stem.conv" in name:
_lowercase =name.replace('stem.conv' , 'bit.embedder.convolution' )
if "blocks" in name:
_lowercase =name.replace('blocks' , 'layers' )
if "head.fc" in name:
_lowercase =name.replace('head.fc' , 'classifier.1' )
if name.startswith('norm' ):
_lowercase ='bit.' + name
if "bit" not in name and "classifier" not in name:
_lowercase ='bit.encoder.' + name
return name
def a ( ) -> Optional[Any]:
"""simple docstring"""
_lowercase ='http://images.cocodataset.org/val2017/000000039769.jpg'
_lowercase =Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def a ( A__ : Union[str, Any] , A__ : str , A__ : int=False ) -> str:
"""simple docstring"""
_lowercase =get_config(A__ )
# load original model from timm
_lowercase =create_model(A__ , pretrained=A__ )
timm_model.eval()
# load state_dict of original model
_lowercase =timm_model.state_dict()
for key in state_dict.copy().keys():
_lowercase =state_dict.pop(A__ )
_lowercase =val.squeeze() if 'head' in key else val
# load HuggingFace model
_lowercase =BitForImageClassification(A__ )
model.eval()
model.load_state_dict(A__ )
# create image processor
_lowercase =create_transform(**resolve_data_config({} , model=A__ ) )
_lowercase =transform.transforms
_lowercase ={
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
_lowercase =BitImageProcessor(
do_resize=A__ , size={'shortest_edge': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=A__ , crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]} , do_normalize=A__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
_lowercase =prepare_img()
_lowercase =transform(A__ ).unsqueeze(0 )
_lowercase =processor(A__ , return_tensors='pt' ).pixel_values
# verify pixel values
assert torch.allclose(A__ , A__ )
# verify logits
with torch.no_grad():
_lowercase =model(A__ )
_lowercase =outputs.logits
print('Logits:' , logits[0, :3] )
print('Predicted class:' , model.config.idalabel[logits.argmax(-1 ).item()] )
_lowercase =timm_model(A__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(A__ , outputs.logits , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(A__ ).mkdir(exist_ok=A__ )
print(F'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(A__ )
processor.save_pretrained(A__ )
if push_to_hub:
print(F'''Pushing model {model_name} and processor to the hub''' )
model.push_to_hub(F'''ybelkada/{model_name}''' )
processor.push_to_hub(F'''ybelkada/{model_name}''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='resnetv2_50x1_bitm',
type=str,
help='Name of the BiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model to the hub.',
)
lowercase_ = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 205 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ = {
'configuration_bloom': ['BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BloomConfig', 'BloomOnnxConfig'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['BloomTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST',
'BloomForCausalLM',
'BloomModel',
'BloomPreTrainedModel',
'BloomForSequenceClassification',
'BloomForTokenClassification',
'BloomForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 205 | 1 |
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : Dict = logging.get_logger(__name__)
_snake_case : int = ['model.decoder.embed_positions.weights']
def snake_case_ (UpperCamelCase : List[Any] ):
'''simple docstring'''
if "emb" in name:
_a = name.replace('''emb''' , '''model.decoder.embed_tokens''' )
if "transformer" in name:
_a = name.replace('''transformer''' , '''model.decoder''' )
if "cross_attention" in name:
_a = name.replace('''cross_attention''' , '''encoder_attn''' )
if "linear1" in name:
_a = name.replace('''linear1''' , '''fc1''' )
if "linear2" in name:
_a = name.replace('''linear2''' , '''fc2''' )
if "norm1" in name:
_a = name.replace('''norm1''' , '''self_attn_layer_norm''' )
if "norm_cross" in name:
_a = name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' )
if "norm2" in name:
_a = name.replace('''norm2''' , '''final_layer_norm''' )
if "out_norm" in name:
_a = name.replace('''out_norm''' , '''model.decoder.layer_norm''' )
if "linears" in name:
_a = name.replace('''linears''' , '''lm_heads''' )
if "condition_provider.conditioners.description.output_proj" in name:
_a = name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' )
return name
def snake_case_ (UpperCamelCase : OrderedDict , UpperCamelCase : int ):
'''simple docstring'''
_a = list(state_dict.keys() )
_a = {}
for key in keys:
_a = state_dict.pop(UpperCamelCase )
_a = rename_keys(UpperCamelCase )
if "in_proj_weight" in key:
# split fused qkv proj
_a = val[:hidden_size, :]
_a = val[hidden_size : 2 * hidden_size, :]
_a = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
_a = val
else:
_a = val
return state_dict, enc_dec_proj_state_dict
def snake_case_ (UpperCamelCase : str ):
'''simple docstring'''
if checkpoint == "small":
# default config values
_a = 1024
_a = 24
_a = 16
elif checkpoint == "medium":
_a = 1536
_a = 48
_a = 24
elif checkpoint == "large":
_a = 2048
_a = 48
_a = 32
else:
raise ValueError(f'Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.' )
_a = MusicgenDecoderConfig(
hidden_size=UpperCamelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=UpperCamelCase , num_attention_heads=UpperCamelCase , )
return config
@torch.no_grad()
def snake_case_ (UpperCamelCase : List[str] , UpperCamelCase : List[Any]=None , UpperCamelCase : List[Any]=None , UpperCamelCase : Optional[int]="cpu" ):
'''simple docstring'''
_a = MusicGen.get_pretrained(UpperCamelCase , device=UpperCamelCase )
_a = decoder_config_from_checkpoint(UpperCamelCase )
_a = fairseq_model.lm.state_dict()
_a , _a = rename_state_dict(
UpperCamelCase , hidden_size=decoder_config.hidden_size )
_a = TaEncoderModel.from_pretrained('''t5-base''' )
_a = EncodecModel.from_pretrained('''facebook/encodec_32khz''' )
_a = MusicgenForCausalLM(UpperCamelCase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
_a , _a = decoder.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
for key in missing_keys.copy():
if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(UpperCamelCase )
if len(UpperCamelCase ) > 0:
raise ValueError(f'Missing key(s) in state_dict: {missing_keys}' )
if len(UpperCamelCase ) > 0:
raise ValueError(f'Unexpected key(s) in state_dict: {unexpected_keys}' )
# init the composite model
_a = MusicgenForConditionalGeneration(text_encoder=UpperCamelCase , audio_encoder=UpperCamelCase , decoder=UpperCamelCase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(UpperCamelCase )
# check we can do a forward pass
_a = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
_a = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
_a = model(input_ids=UpperCamelCase , decoder_input_ids=UpperCamelCase ).logits
if logits.shape != (8, 1, 2048):
raise ValueError('''Incorrect shape for logits''' )
# now construct the processor
_a = AutoTokenizer.from_pretrained('''t5-base''' )
_a = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' )
_a = MusicgenProcessor(feature_extractor=UpperCamelCase , tokenizer=UpperCamelCase )
# set the appropriate bos/pad token ids
_a = 2048
_a = 2048
# set other default generation config params
_a = int(30 * audio_encoder.config.frame_rate )
_a = True
_a = 3.0
if pytorch_dump_folder is not None:
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
logger.info(f'Saving model {checkpoint} to {pytorch_dump_folder}' )
model.save_pretrained(UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
if repo_id:
logger.info(f'Pushing model {checkpoint} to {repo_id}' )
model.push_to_hub(UpperCamelCase )
processor.push_to_hub(UpperCamelCase )
if __name__ == "__main__":
_snake_case : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint',
default='small',
type=str,
help='Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.',
)
parser.add_argument(
'--pytorch_dump_folder',
required=True,
default=None,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
parser.add_argument(
'--device', default='cpu', type=str, help='Torch device to run the conversion, either cpu or cuda.'
)
_snake_case : int = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 350 |
'''simple docstring'''
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
_snake_case : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class A ( _a ):
def __init__( self : Dict , lowerCAmelCase_ : CLIPSegForImageSegmentation , lowerCAmelCase_ : CLIPSegProcessor , lowerCAmelCase_ : AutoencoderKL , lowerCAmelCase_ : CLIPTextModel , lowerCAmelCase_ : CLIPTokenizer , lowerCAmelCase_ : UNetaDConditionModel , lowerCAmelCase_ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCAmelCase_ : StableDiffusionSafetyChecker , lowerCAmelCase_ : CLIPImageProcessor , ) -> int:
"""simple docstring"""
super().__init__()
if hasattr(scheduler.config , '''steps_offset''' ) and scheduler.config.steps_offset != 1:
_a = (
F'The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'
F' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '
'''to update the config accordingly as leaving `steps_offset` might led to incorrect results'''
''' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'''
''' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'''
''' file'''
)
deprecate('''steps_offset!=1''' , '''1.0.0''' , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ )
_a = dict(scheduler.config )
_a = 1
_a = FrozenDict(lowerCAmelCase_ )
if hasattr(scheduler.config , '''skip_prk_steps''' ) and scheduler.config.skip_prk_steps is False:
_a = (
F'The configuration file of this scheduler: {scheduler} has not set the configuration'
''' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'''
''' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'''
''' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'''
''' Hub, it would be very nice if you could open a Pull request for the'''
''' `scheduler/scheduler_config.json` file'''
)
deprecate('''skip_prk_steps not set''' , '''1.0.0''' , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ )
_a = dict(scheduler.config )
_a = True
_a = FrozenDict(lowerCAmelCase_ )
if safety_checker is None:
logger.warning(
F'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
segmentation_model=lowerCAmelCase_ , segmentation_processor=lowerCAmelCase_ , vae=lowerCAmelCase_ , text_encoder=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , )
def __lowerCAmelCase ( self : str , lowerCAmelCase_ : Optional[Union[str, int]] = "auto" ) -> Any:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_a = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCAmelCase_ )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
self.enable_attention_slicing(lowerCAmelCase_ )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
_a = torch.device('''cuda''' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(lowerCAmelCase_ , lowerCAmelCase_ )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __lowerCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
if self.device != torch.device('''meta''' ) or not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCAmelCase_ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : List[Any] , lowerCAmelCase_ : Union[str, List[str]] , lowerCAmelCase_ : Union[torch.FloatTensor, PIL.Image.Image] , lowerCAmelCase_ : str , lowerCAmelCase_ : int = 5_12 , lowerCAmelCase_ : int = 5_12 , lowerCAmelCase_ : int = 50 , lowerCAmelCase_ : float = 7.5 , lowerCAmelCase_ : Optional[Union[str, List[str]]] = None , lowerCAmelCase_ : Optional[int] = 1 , lowerCAmelCase_ : float = 0.0 , lowerCAmelCase_ : Optional[torch.Generator] = None , lowerCAmelCase_ : Optional[torch.FloatTensor] = None , lowerCAmelCase_ : Optional[str] = "pil" , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCAmelCase_ : int = 1 , **lowerCAmelCase_ : Any , ) -> Tuple:
"""simple docstring"""
_a = self.segmentation_processor(
text=[text] , images=[image] , padding='''max_length''' , return_tensors='''pt''' ).to(self.device )
_a = self.segmentation_model(**lowerCAmelCase_ )
_a = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
_a = self.numpy_to_pil(lowerCAmelCase_ )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
_a = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , mask_image=lowerCAmelCase_ , height=lowerCAmelCase_ , width=lowerCAmelCase_ , num_inference_steps=lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , negative_prompt=lowerCAmelCase_ , num_images_per_prompt=lowerCAmelCase_ , eta=lowerCAmelCase_ , generator=lowerCAmelCase_ , latents=lowerCAmelCase_ , output_type=lowerCAmelCase_ , return_dict=lowerCAmelCase_ , callback=lowerCAmelCase_ , callback_steps=lowerCAmelCase_ , )
| 179 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Optional[int] =logging.get_logger(__name__)
_UpperCAmelCase : Tuple ={
'uw-madison/mra-base-512-4': 'https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json',
}
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = '''mra'''
def __init__( self , __lowercase=5_0_2_6_5 , __lowercase=7_6_8 , __lowercase=1_2 , __lowercase=1_2 , __lowercase=3_0_7_2 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=5_1_2 , __lowercase=1 , __lowercase=0.02 , __lowercase=1e-5 , __lowercase="absolute" , __lowercase=4 , __lowercase="full" , __lowercase=0 , __lowercase=0 , __lowercase=1 , __lowercase=0 , __lowercase=2 , **__lowercase , ) -> List[Any]:
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ : Dict = vocab_size
lowerCAmelCase_ : Any = max_position_embeddings
lowerCAmelCase_ : str = hidden_size
lowerCAmelCase_ : Union[str, Any] = num_hidden_layers
lowerCAmelCase_ : str = num_attention_heads
lowerCAmelCase_ : Union[str, Any] = intermediate_size
lowerCAmelCase_ : Dict = hidden_act
lowerCAmelCase_ : Dict = hidden_dropout_prob
lowerCAmelCase_ : Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase_ : Optional[Any] = initializer_range
lowerCAmelCase_ : Any = type_vocab_size
lowerCAmelCase_ : int = layer_norm_eps
lowerCAmelCase_ : Dict = position_embedding_type
lowerCAmelCase_ : List[str] = block_per_row
lowerCAmelCase_ : int = approx_mode
lowerCAmelCase_ : Union[str, Any] = initial_prior_first_n_blocks
lowerCAmelCase_ : Any = initial_prior_diagonal_n_blocks | 262 |
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
lowerCAmelCase : List[Any] = datasets.load_iris()
lowerCAmelCase : List[str] = np.array(data['data'])
lowerCAmelCase : Any = np.array(data['target'])
lowerCAmelCase : Dict = data['target_names']
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : str = train_test_split(X, y)
def A_ ( a , a ):
"""simple docstring"""
return np.linalg.norm(np.array(a ) - np.array(a ) )
def A_ ( a , a , a , a , a=5 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = zip(a , a )
# List of distances of all points from the point to be classified
SCREAMING_SNAKE_CASE_ : List[Any] = []
for data_point in data:
SCREAMING_SNAKE_CASE_ : Optional[int] = euclidean_distance(data_point[0] , a )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
SCREAMING_SNAKE_CASE_ : List[str] = [i[1] for i in sorted(a )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
SCREAMING_SNAKE_CASE_ : List[Any] = Counter(a ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 253 | 0 |
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
UpperCamelCase = 2
class __UpperCAmelCase :
def __init__( self: List[str] , *, # begin keyword-only arguments
UpperCAmelCase_: Any="<s>" , UpperCAmelCase_: int="<pad>" , UpperCAmelCase_: Optional[int]="</s>" , UpperCAmelCase_: Union[str, Any]="<unk>" , UpperCAmelCase_: Dict=None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = bos, unk, pad, eos
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = self.add_symbol(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.add_symbol(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.add_symbol(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.add_symbol(UpperCAmelCase_ )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = len(self.symbols )
def __eq__( self: Union[str, Any] , UpperCAmelCase_: List[Any] ):
'''simple docstring'''
return self.indices == other.indices
def __getitem__( self: List[str] , UpperCAmelCase_: int ):
'''simple docstring'''
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self: List[Any] ):
'''simple docstring'''
return len(self.symbols )
def __contains__( self: List[Any] , UpperCAmelCase_: Dict ):
'''simple docstring'''
return sym in self.indices
@classmethod
def UpperCamelCase ( cls: List[str] , UpperCAmelCase_: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = cls()
d.add_from_file(UpperCAmelCase_ )
return d
def UpperCamelCase ( self: Any , UpperCAmelCase_: str , UpperCAmelCase_: str=1 , UpperCAmelCase_: int=False ):
'''simple docstring'''
if word in self.indices and not overwrite:
_SCREAMING_SNAKE_CASE = self.indices[word]
_SCREAMING_SNAKE_CASE = self.count[idx] + n
return idx
else:
_SCREAMING_SNAKE_CASE = len(self.symbols )
_SCREAMING_SNAKE_CASE = idx
self.symbols.append(UpperCAmelCase_ )
self.count.append(UpperCAmelCase_ )
return idx
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: Optional[int] ):
'''simple docstring'''
return 0
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: Dict ):
'''simple docstring'''
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
try:
with open(UpperCAmelCase_ , """r""" , encoding="""utf-8""" ) as fd:
self.add_from_file(UpperCAmelCase_ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("""Incorrect encoding detected in {}, please rebuild the dataset""".format(UpperCAmelCase_ ) )
return
_SCREAMING_SNAKE_CASE = f.readlines()
_SCREAMING_SNAKE_CASE = self._load_meta(UpperCAmelCase_ )
for line in lines[indices_start_line:]:
try:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = line.rstrip().rsplit(""" """ , 1 )
if field == "#fairseq:overwrite":
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = line.rsplit(""" """ , 1 )
else:
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = int(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = line
if word in self and not overwrite:
raise RuntimeError(
"""Duplicate word found when loading Dictionary: '{}'. """
"""Duplicate words can overwrite earlier ones by adding the """
"""#fairseq:overwrite flag at the end of the corresponding row """
"""in the dictionary file. If using the Camembert model, please """
"""download an updated copy of the model file.""".format(UpperCAmelCase_ ) )
self.add_symbol(UpperCAmelCase_ , n=UpperCAmelCase_ , overwrite=UpperCAmelCase_ )
except ValueError:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt> [flags]'""" )
def __lowerCamelCase ( snake_case__ ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = dict((re.sub(r"""@@$""" ,"""""" ,snake_case__ ), v) if k.endswith("""@@""" ) else (re.sub(r"""$""" ,"""</w>""" ,snake_case__ ), v) for k, v in d.items() )
_SCREAMING_SNAKE_CASE = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[F'{k}</w>']
_SCREAMING_SNAKE_CASE = d[k] # restore
return da
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Optional[int]:
"""simple docstring"""
if not os.path.exists(snake_case__ ):
raise ValueError(F'path {biogpt_checkpoint_path} does not exist!' )
os.makedirs(snake_case__ ,exist_ok=snake_case__ )
print(F'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
_SCREAMING_SNAKE_CASE = os.path.join(snake_case__ ,"""checkpoint.pt""" )
if not os.path.isfile(snake_case__ ):
raise ValueError(F'path to the file {checkpoint_file} does not exist!' )
_SCREAMING_SNAKE_CASE = torch.load(snake_case__ ,map_location="""cpu""" )
_SCREAMING_SNAKE_CASE = chkpt["""cfg"""]["""model"""]
# dicts
_SCREAMING_SNAKE_CASE = os.path.join(snake_case__ ,"""dict.txt""" )
if not os.path.isfile(snake_case__ ):
raise ValueError(F'path to the file {dict_file} does not exist!' )
_SCREAMING_SNAKE_CASE = Dictionary.load(snake_case__ )
_SCREAMING_SNAKE_CASE = rewrite_dict_keys(src_dict.indices )
_SCREAMING_SNAKE_CASE = len(snake_case__ )
_SCREAMING_SNAKE_CASE = os.path.join(snake_case__ ,VOCAB_FILES_NAMES["""vocab_file"""] )
print(F'Generating {src_vocab_file} of {src_vocab_size} records' )
with open(snake_case__ ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(snake_case__ ,ensure_ascii=snake_case__ ,indent=snake_case__ ) )
# merges_file (bpecodes)
_SCREAMING_SNAKE_CASE = os.path.join(snake_case__ ,"""bpecodes""" )
if not os.path.isfile(snake_case__ ):
raise ValueError(F'path to the file {bpecodes_file} does not exist!' )
_SCREAMING_SNAKE_CASE = os.path.join(snake_case__ ,VOCAB_FILES_NAMES["""merges_file"""] )
shutil.copyfile(snake_case__ ,snake_case__ )
# model config
_SCREAMING_SNAKE_CASE = os.path.join(snake_case__ ,"""config.json""" )
_SCREAMING_SNAKE_CASE = {
"""activation_dropout""": args["""activation_dropout"""],
"""architectures""": ["""BioGptForCausalLM"""],
"""attention_probs_dropout_prob""": args["""attention_dropout"""],
"""bos_token_id""": 0,
"""eos_token_id""": 2,
"""hidden_act""": args["""activation_fn"""],
"""hidden_dropout_prob""": args["""dropout"""],
"""hidden_size""": args["""decoder_embed_dim"""],
"""initializer_range""": 0.02,
"""intermediate_size""": args["""decoder_ffn_embed_dim"""],
"""layer_norm_eps""": 1e-1_2,
"""layerdrop""": args["""decoder_layerdrop"""],
"""max_position_embeddings""": args["""max_target_positions"""],
"""model_type""": """biogpt""",
"""num_attention_heads""": args["""decoder_attention_heads"""],
"""num_hidden_layers""": args["""decoder_layers"""],
"""pad_token_id""": 1,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_decoder_input_output_embed"""],
"""vocab_size""": src_vocab_size,
}
# good hparam defaults to start with
print(F'Generating {biogpt_model_config_file}' )
with open(snake_case__ ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(snake_case__ ,ensure_ascii=snake_case__ ,indent=snake_case__ ) )
# tokenizer config
_SCREAMING_SNAKE_CASE = os.path.join(snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = {
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
"""model_max_length""": 10_24,
"""pad_token""": """<pad>""",
"""special_tokens_map_file""": None,
"""tokenizer_class""": """BioGptTokenizer""",
"""unk_token""": """<unk>""",
}
print(F'Generating {biogpt_tokenizer_config_file}' )
with open(snake_case__ ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(snake_case__ ,ensure_ascii=snake_case__ ,indent=snake_case__ ) )
# model
_SCREAMING_SNAKE_CASE = chkpt["""model"""]
# remove unneeded keys
_SCREAMING_SNAKE_CASE = [
"""decoder.version""",
]
for k in ignore_keys:
model_state_dict.pop(snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("""output_projection.weight""" ):
_SCREAMING_SNAKE_CASE = model_state_dict.pop(snake_case__ )
else:
_SCREAMING_SNAKE_CASE = model_state_dict.pop(snake_case__ )
_SCREAMING_SNAKE_CASE = BioGptConfig.from_pretrained(snake_case__ )
_SCREAMING_SNAKE_CASE = BioGptForCausalLM(snake_case__ )
# check that it loads ok
model_new.load_state_dict(snake_case__ )
# save
_SCREAMING_SNAKE_CASE = os.path.join(snake_case__ ,snake_case__ )
print(F'Generating {pytorch_weights_dump_path}' )
torch.save(snake_case__ ,snake_case__ )
print("""Conversion is done!""" )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--biogpt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCamelCase = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 125 |
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : Optional[Any] = OpenAIGPTTokenizer
__snake_case : Dict = OpenAIGPTTokenizerFast
__snake_case : Optional[Any] = True
__snake_case : Dict = False
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_SCREAMING_SNAKE_CASE = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
_SCREAMING_SNAKE_CASE = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
_SCREAMING_SNAKE_CASE = ["""#version: 0.2""", """l o""", """lo w""", """e r</w>""", """"""]
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(UpperCAmelCase_ ) )
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: Dict ):
'''simple docstring'''
return "lower newer", "lower newer"
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
_SCREAMING_SNAKE_CASE = """lower"""
_SCREAMING_SNAKE_CASE = ["""low""", """er</w>"""]
_SCREAMING_SNAKE_CASE = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = tokens + ["""<unk>"""]
_SCREAMING_SNAKE_CASE = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: Union[str, Any]=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
# Simple input
_SCREAMING_SNAKE_CASE = """This is a simple input"""
_SCREAMING_SNAKE_CASE = ["""This is a simple input 1""", """This is a simple input 2"""]
_SCREAMING_SNAKE_CASE = ("""This is a simple input""", """This is a pair""")
_SCREAMING_SNAKE_CASE = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(UpperCAmelCase_ , tokenizer_r.encode , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding="""max_length""" )
# Simple input
self.assertRaises(UpperCAmelCase_ , tokenizer_r.encode_plus , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding="""max_length""" )
# Simple input
self.assertRaises(
UpperCAmelCase_ , tokenizer_r.batch_encode_plus , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding="""max_length""" , )
# Pair input
self.assertRaises(UpperCAmelCase_ , tokenizer_r.encode , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding="""max_length""" )
# Pair input
self.assertRaises(UpperCAmelCase_ , tokenizer_r.encode_plus , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding="""max_length""" )
# Pair input
self.assertRaises(
UpperCAmelCase_ , tokenizer_r.batch_encode_plus , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding="""max_length""" , )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
pass
@require_ftfy
@require_spacy
@require_tokenizers
class __UpperCAmelCase (_UpperCAmelCase ):
pass
| 125 | 1 |
from math import pow
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> tuple[int, int]:
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
lowerCamelCase__ : Union[str, Any] = int(pow(_UpperCAmelCase , _UpperCAmelCase ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = backtrack(
_UpperCAmelCase , _UpperCAmelCase , current_number + 1 , _UpperCAmelCase , _UpperCAmelCase )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
lowerCamelCase__ , lowerCamelCase__ : str = backtrack(
_UpperCAmelCase , _UpperCAmelCase , current_number + 1 , _UpperCAmelCase , _UpperCAmelCase )
return current_sum, solutions_count
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> int:
if not (1 <= needed_sum <= 1000 and 2 <= power <= 10):
raise ValueError(
'Invalid input\n'
'needed_sum must be between 1 and 1000, power between 2 and 10.' )
return backtrack(_UpperCAmelCase , _UpperCAmelCase , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class _UpperCAmelCase :
UpperCamelCase = PegasusConfig
UpperCamelCase = {}
UpperCamelCase = '''gelu'''
def __init__( self :Union[str, Any] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :str=13 , __UpperCamelCase :List[Any]=7 , __UpperCamelCase :Union[str, Any]=True , __UpperCamelCase :List[Any]=False , __UpperCamelCase :Any=99 , __UpperCamelCase :Tuple=32 , __UpperCamelCase :Optional[int]=2 , __UpperCamelCase :Optional[Any]=4 , __UpperCamelCase :Tuple=37 , __UpperCamelCase :Optional[Any]=0.1 , __UpperCamelCase :Tuple=0.1 , __UpperCamelCase :Optional[int]=40 , __UpperCamelCase :Tuple=2 , __UpperCamelCase :Dict=1 , __UpperCamelCase :Any=0 , ):
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_labels
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = eos_token_id
A = pad_token_id
A = bos_token_id
def lowerCamelCase ( self :Tuple ):
A = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
A = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
A = tf.concat([input_ids, eos_tensor] , axis=1 )
A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A = prepare_pegasus_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return config, inputs_dict
def lowerCamelCase ( self :str , __UpperCamelCase :str , __UpperCamelCase :Union[str, Any] ):
A = TFPegasusModel(config=__UpperCamelCase ).get_decoder()
A = inputs_dict["input_ids"]
A = input_ids[:1, :]
A = inputs_dict["attention_mask"][:1, :]
A = inputs_dict["head_mask"]
A = 1
# first forward pass
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , head_mask=__UpperCamelCase , use_cache=__UpperCamelCase )
A, A = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A = ids_tensor((self.batch_size, 3) , config.vocab_size )
A = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
A = tf.concat([input_ids, next_tokens] , axis=-1 )
A = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase )[0]
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
A = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
A = output_from_no_past[:, -3:, random_slice_idx]
A = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__UpperCamelCase , __UpperCamelCase , rtol=1e-3 )
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , ):
if attention_mask is None:
A = tf.cast(tf.math.not_equal(UpperCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
A = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
A = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _UpperCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
UpperCamelCase = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
UpperCamelCase = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
UpperCamelCase = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = False
def lowerCamelCase ( self :int ):
A = TFPegasusModelTester(self )
A = ConfigTester(self , config_class=__UpperCamelCase )
def lowerCamelCase ( self :Dict ):
self.config_tester.run_common_tests()
def lowerCamelCase ( self :Any ):
A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__UpperCamelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
UpperCamelCase = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
UpperCamelCase = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
UpperCamelCase = '''google/pegasus-xsum'''
@cached_property
def lowerCamelCase ( self :Any ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def lowerCamelCase ( self :Dict ):
A = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def lowerCamelCase ( self :str , **__UpperCamelCase :str ):
A = self.translate_src_text(**__UpperCamelCase )
assert self.expected_text == generated_words
def lowerCamelCase ( self :Any , **__UpperCamelCase :List[str] ):
A = self.tokenizer(self.src_text , **__UpperCamelCase , padding=__UpperCamelCase , return_tensors="tf" )
A = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__UpperCamelCase , )
A = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__UpperCamelCase )
return generated_words
@slow
def lowerCamelCase ( self :Union[str, Any] ):
self._assert_generated_batch_equal_expected()
| 292 | 0 |
def __a ( lowerCAmelCase_ : int ) -> int:
'''simple docstring'''
UpperCAmelCase_= [1]
UpperCAmelCase_= 0, 0, 0
UpperCAmelCase_= ugly_nums[ia] * 2
UpperCAmelCase_= ugly_nums[ia] * 3
UpperCAmelCase_= ugly_nums[ia] * 5
for _ in range(1 ,__snake_case ):
UpperCAmelCase_= min(__snake_case ,__snake_case ,__snake_case )
ugly_nums.append(__snake_case )
if next_num == next_a:
ia += 1
UpperCAmelCase_= ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
UpperCAmelCase_= ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
UpperCAmelCase_= ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f'{ugly_numbers(200) = }')
| 364 |
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
__A = datasets.utils.logging.get_logger(__name__)
__A = ['''names''', '''prefix''']
__A = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols''']
__A = ['''encoding_errors''', '''on_bad_lines''']
__A = ['''date_format''']
@dataclass
class lowercase ( datasets.BuilderConfig):
"""simple docstring"""
a__ : str = ","
a__ : Optional[str] = None
a__ : Optional[Union[int, List[int], str]] = "infer"
a__ : Optional[List[str]] = None
a__ : Optional[List[str]] = None
a__ : Optional[Union[int, str, List[int], List[str]]] = None
a__ : Optional[Union[List[int], List[str]]] = None
a__ : Optional[str] = None
a__ : bool = True
a__ : Optional[Literal["c", "python", "pyarrow"]] = None
a__ : Dict[Union[int, str], Callable[[Any], Any]] = None
a__ : Optional[list] = None
a__ : Optional[list] = None
a__ : bool = False
a__ : Optional[Union[int, List[int]]] = None
a__ : Optional[int] = None
a__ : Optional[Union[str, List[str]]] = None
a__ : bool = True
a__ : bool = True
a__ : bool = False
a__ : bool = True
a__ : Optional[str] = None
a__ : str = "."
a__ : Optional[str] = None
a__ : str = '"'
a__ : int = 0
a__ : Optional[str] = None
a__ : Optional[str] = None
a__ : Optional[str] = None
a__ : Optional[str] = None
a__ : bool = True
a__ : bool = True
a__ : int = 0
a__ : bool = True
a__ : bool = False
a__ : Optional[str] = None
a__ : int = 1_0000
a__ : Optional[datasets.Features] = None
a__ : Optional[str] = "strict"
a__ : Literal["error", "warn", "skip"] = "error"
a__ : Optional[str] = None
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
if self.delimiter is not None:
UpperCAmelCase_= self.delimiter
if self.column_names is not None:
UpperCAmelCase_= self.column_names
@property
def _SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
UpperCAmelCase_= {
"""sep""": self.sep,
"""header""": self.header,
"""names""": self.names,
"""index_col""": self.index_col,
"""usecols""": self.usecols,
"""prefix""": self.prefix,
"""mangle_dupe_cols""": self.mangle_dupe_cols,
"""engine""": self.engine,
"""converters""": self.converters,
"""true_values""": self.true_values,
"""false_values""": self.false_values,
"""skipinitialspace""": self.skipinitialspace,
"""skiprows""": self.skiprows,
"""nrows""": self.nrows,
"""na_values""": self.na_values,
"""keep_default_na""": self.keep_default_na,
"""na_filter""": self.na_filter,
"""verbose""": self.verbose,
"""skip_blank_lines""": self.skip_blank_lines,
"""thousands""": self.thousands,
"""decimal""": self.decimal,
"""lineterminator""": self.lineterminator,
"""quotechar""": self.quotechar,
"""quoting""": self.quoting,
"""escapechar""": self.escapechar,
"""comment""": self.comment,
"""encoding""": self.encoding,
"""dialect""": self.dialect,
"""error_bad_lines""": self.error_bad_lines,
"""warn_bad_lines""": self.warn_bad_lines,
"""skipfooter""": self.skipfooter,
"""doublequote""": self.doublequote,
"""memory_map""": self.memory_map,
"""float_precision""": self.float_precision,
"""chunksize""": self.chunksize,
"""encoding_errors""": self.encoding_errors,
"""on_bad_lines""": self.on_bad_lines,
"""date_format""": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , __UpperCAmelCase ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class lowercase ( datasets.ArrowBasedBuilder):
"""simple docstring"""
a__ : int = CsvConfig
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
return datasets.DatasetInfo(features=self.config.features )
def _SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : Dict ) -> Optional[int]:
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
UpperCAmelCase_= dl_manager.download_and_extract(self.config.data_files )
if isinstance(__UpperCAmelCase , (str, list, tuple) ):
UpperCAmelCase_= data_files
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
UpperCAmelCase_= [files]
UpperCAmelCase_= [dl_manager.iter_files(__UpperCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
UpperCAmelCase_= []
for split_name, files in data_files.items():
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
UpperCAmelCase_= [files]
UpperCAmelCase_= [dl_manager.iter_files(__UpperCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=__UpperCAmelCase , gen_kwargs={"""files""": files} ) )
return splits
def _SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : pa.Table ) -> pa.Table:
if self.config.features is not None:
UpperCAmelCase_= self.config.features.arrow_schema
if all(not require_storage_cast(__UpperCAmelCase ) for feature in self.config.features.values() ):
# cheaper cast
UpperCAmelCase_= pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=__UpperCAmelCase )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
UpperCAmelCase_= table_cast(__UpperCAmelCase , __UpperCAmelCase )
return pa_table
def _SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : List[Any] ) -> List[str]:
UpperCAmelCase_= self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
UpperCAmelCase_= (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(__UpperCAmelCase ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(__UpperCAmelCase ) ):
UpperCAmelCase_= pd.read_csv(__UpperCAmelCase , iterator=__UpperCAmelCase , dtype=__UpperCAmelCase , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(__UpperCAmelCase ):
UpperCAmelCase_= pa.Table.from_pandas(__UpperCAmelCase )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__UpperCAmelCase )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(__UpperCAmelCase )}: {e}""" )
raise
| 277 | 0 |
"""simple docstring"""
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
UpperCAmelCase_ : str = """0.12""" # assumed parallelism: 8
if is_torch_available():
import torch
def _A (__a , __a , __a=None ) -> str:
"""simple docstring"""
if rng is None:
SCREAMING_SNAKE_CASE_ : Any = random.Random()
SCREAMING_SNAKE_CASE_ : Optional[Any] = 1
for dim in shape:
total_dims *= dim
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
for _ in range(__a ):
values.append(rng.randint(0 , vocab_size - 1 ) )
SCREAMING_SNAKE_CASE_ : Optional[int] = np.array(__a , dtype=jnp.intaa ).reshape(__a )
return output
def _A (__a , __a=None ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor(__a , vocab_size=2 , rng=__a )
# make sure that at least one token is attended to for each batch
SCREAMING_SNAKE_CASE_ : str = 1
return attn_mask
@require_flax
class lowerCAmelCase__ :
'''simple docstring'''
__UpperCamelCase = None
__UpperCamelCase = ()
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
SCREAMING_SNAKE_CASE_ : List[Any] = 2
SCREAMING_SNAKE_CASE_ : Tuple = inputs['''input_ids'''].shape[-1] // 2
SCREAMING_SNAKE_CASE_ : Tuple = inputs['''input_ids'''][:max_batch_size, :sequence_length]
SCREAMING_SNAKE_CASE_ : Optional[Any] = jnp.ones_like(lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
SCREAMING_SNAKE_CASE_ : Union[str, Any] = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
SCREAMING_SNAKE_CASE_ : Any = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_ : Tuple = False
SCREAMING_SNAKE_CASE_ : int = max_length
SCREAMING_SNAKE_CASE_ : str = 0
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_ : Any = model_class(lowercase_)
SCREAMING_SNAKE_CASE_ : int = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE_ : List[str] = getattr(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pt_model_class(lowercase_).eval()
SCREAMING_SNAKE_CASE_ : int = load_flax_weights_in_pytorch_model(lowercase_ , flax_model.params)
SCREAMING_SNAKE_CASE_ : Tuple = flax_model.generate(lowercase_).sequences
SCREAMING_SNAKE_CASE_ : Dict = pt_model.generate(torch.tensor(lowercase_ , dtype=torch.long))
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
SCREAMING_SNAKE_CASE_ : int = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_ : Any = False
SCREAMING_SNAKE_CASE_ : Optional[Any] = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_ : List[str] = model_class(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = model.generate(lowercase_).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase_)
SCREAMING_SNAKE_CASE_ : Any = jit(model.generate)
SCREAMING_SNAKE_CASE_ : Optional[int] = jit_generate(lowercase_).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_ : Any = True
SCREAMING_SNAKE_CASE_ : List[Any] = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_ : List[Any] = model_class(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = model.generate(lowercase_).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jit(model.generate)
SCREAMING_SNAKE_CASE_ : Any = jit_generate(lowercase_).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_ : Dict = False
SCREAMING_SNAKE_CASE_ : int = max_length
SCREAMING_SNAKE_CASE_ : Tuple = 2
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model_class(lowercase_)
SCREAMING_SNAKE_CASE_ : Any = model.generate(lowercase_).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = jit(model.generate)
SCREAMING_SNAKE_CASE_ : Tuple = jit_generate(lowercase_).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_ : List[str] = False
SCREAMING_SNAKE_CASE_ : Optional[Any] = max_length
SCREAMING_SNAKE_CASE_ : str = 2
SCREAMING_SNAKE_CASE_ : Optional[Any] = 2
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_ : List[Any] = model_class(lowercase_)
SCREAMING_SNAKE_CASE_ : int = model.generate(lowercase_).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences)
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
SCREAMING_SNAKE_CASE_ : str = max_length
SCREAMING_SNAKE_CASE_ : Tuple = 0.8
SCREAMING_SNAKE_CASE_ : Tuple = 10
SCREAMING_SNAKE_CASE_ : Optional[int] = 0.3
SCREAMING_SNAKE_CASE_ : List[str] = 1
SCREAMING_SNAKE_CASE_ : Tuple = 8
SCREAMING_SNAKE_CASE_ : Optional[Any] = 9
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_ : Tuple = model_class(lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model.generate(lowercase_).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = jit(model.generate)
SCREAMING_SNAKE_CASE_ : List[Any] = jit_generate(lowercase_).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_ : Any = max_length
SCREAMING_SNAKE_CASE_ : List[Any] = 1
SCREAMING_SNAKE_CASE_ : Optional[int] = 8
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 9
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model_class(lowercase_)
SCREAMING_SNAKE_CASE_ : int = model.generate(lowercase_).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = jit(model.generate)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jit_generate(lowercase_).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_ : int = max_length
SCREAMING_SNAKE_CASE_ : List[Any] = 2
SCREAMING_SNAKE_CASE_ : Tuple = 1
SCREAMING_SNAKE_CASE_ : Dict = 8
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 9
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_ : Optional[int] = model_class(lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = model.generate(lowercase_).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = jit(model.generate)
SCREAMING_SNAKE_CASE_ : Dict = jit_generate(lowercase_).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = self._get_input_ids_and_config()
# pad attention mask on the left
SCREAMING_SNAKE_CASE_ : str = attention_mask.at[(0, 0)].set(0)
SCREAMING_SNAKE_CASE_ : Dict = False
SCREAMING_SNAKE_CASE_ : Tuple = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_ : Optional[int] = model_class(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = model.generate(lowercase_ , attention_mask=lowercase_).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = jit(model.generate)
SCREAMING_SNAKE_CASE_ : List[Any] = jit_generate(lowercase_ , attention_mask=lowercase_).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = self._get_input_ids_and_config()
# pad attention mask on the left
SCREAMING_SNAKE_CASE_ : List[Any] = attention_mask.at[(0, 0)].set(0)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
SCREAMING_SNAKE_CASE_ : List[Any] = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_ : str = model_class(lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = model.generate(lowercase_ , attention_mask=lowercase_).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jit(model.generate)
SCREAMING_SNAKE_CASE_ : int = jit_generate(lowercase_ , attention_mask=lowercase_).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._get_input_ids_and_config()
# pad attention mask on the left
SCREAMING_SNAKE_CASE_ : Any = attention_mask.at[(0, 0)].set(0)
SCREAMING_SNAKE_CASE_ : Dict = 2
SCREAMING_SNAKE_CASE_ : Any = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_ : Optional[int] = model_class(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = model.generate(lowercase_ , attention_mask=lowercase_).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = jit(model.generate)
SCREAMING_SNAKE_CASE_ : Optional[Any] = jit_generate(lowercase_ , attention_mask=lowercase_).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-bert''')
SCREAMING_SNAKE_CASE_ : Any = FlaxAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''')
SCREAMING_SNAKE_CASE_ : List[str] = '''Hello world'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer(lowercase_ , return_tensors='''np''').input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(lowercase_ , '''do_samples'''):
model.generate(lowercase_ , do_samples=lowercase_)
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(lowercase_ , '''foo'''):
SCREAMING_SNAKE_CASE_ : Optional[Any] = {'''foo''': '''bar'''}
model.generate(lowercase_ , **lowercase_)
| 91 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
UpperCAmelCase_ : str = {"""configuration_gpt_neox""": ["""GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXConfig"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = ["""GPTNeoXTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
"""GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXForCausalLM""",
"""GPTNeoXForQuestionAnswering""",
"""GPTNeoXForSequenceClassification""",
"""GPTNeoXForTokenClassification""",
"""GPTNeoXLayer""",
"""GPTNeoXModel""",
"""GPTNeoXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 91 | 1 |
def lowerCamelCase_ ( lowerCAmelCase: Dict , lowerCAmelCase: str , lowerCAmelCase: List[Any] , lowerCAmelCase: Optional[int] )-> List[str]:
# Return True if there is node that has not iterated.
_snake_case : Union[str, Any] = [False] * len(lowerCAmelCase )
_snake_case : str = []
queue.append(lowerCAmelCase )
_snake_case : Dict = True
while queue:
_snake_case : Dict = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowerCAmelCase )
_snake_case : Tuple = True
_snake_case : List[Any] = u
return visited[t]
def lowerCamelCase_ ( lowerCAmelCase: str , lowerCAmelCase: Any , lowerCAmelCase: int )-> Optional[Any]:
# This array is filled by BFS and to store path
_snake_case : List[Any] = [-1] * (len(lowerCAmelCase ))
_snake_case : Union[str, Any] = 0
while bfs(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
_snake_case : Dict = float('Inf' )
_snake_case : int = sink
while s != source:
# Find the minimum value in select path
_snake_case : Dict = min(lowerCAmelCase , graph[parent[s]][s] )
_snake_case : Tuple = parent[s]
max_flow += path_flow
_snake_case : Tuple = sink
while v != source:
_snake_case : str = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_snake_case : Tuple = parent[v]
return max_flow
lowerCAmelCase_ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
lowerCAmelCase_ , lowerCAmelCase_ = 0, 5
print(ford_fulkerson(graph, source, sink))
| 260 |
from __future__ import annotations
lowerCAmelCase_ = []
def lowerCamelCase_ ( lowerCAmelCase: list[list[int]] , lowerCAmelCase: int , lowerCAmelCase: int )-> bool:
for i in range(len(lowerCAmelCase ) ):
if board[row][i] == 1:
return False
for i in range(len(lowerCAmelCase ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(lowerCAmelCase , -1 , -1 ) , range(lowerCAmelCase , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(lowerCAmelCase , -1 , -1 ) , range(lowerCAmelCase , len(lowerCAmelCase ) ) ):
if board[i][j] == 1:
return False
return True
def lowerCamelCase_ ( lowerCAmelCase: list[list[int]] , lowerCAmelCase: int )-> bool:
if row >= len(lowerCAmelCase ):
solution.append(lowerCAmelCase )
printboard(lowerCAmelCase )
print()
return True
for i in range(len(lowerCAmelCase ) ):
if is_safe(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
_snake_case : Dict = 1
solve(lowerCAmelCase , row + 1 )
_snake_case : str = 0
return False
def lowerCamelCase_ ( lowerCAmelCase: list[list[int]] )-> None:
for i in range(len(lowerCAmelCase ) ):
for j in range(len(lowerCAmelCase ) ):
if board[i][j] == 1:
print('Q' , end=' ' )
else:
print('.' , end=' ' )
print()
# n=int(input("The no. of queens"))
lowerCAmelCase_ = 8
lowerCAmelCase_ = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("""The total no. of solutions are :""", len(solution))
| 260 | 1 |
"""simple docstring"""
import sys
from collections import defaultdict
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = []
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
return self.node_position[vertex]
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Tuple = pos
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
lowerCAmelCase : str = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
lowerCAmelCase : Any = 2 * start + 1
else:
lowerCAmelCase : Dict = 2 * start + 2
if heap[smallest_child] < heap[start]:
lowerCAmelCase , lowerCAmelCase : List[Any] = heap[smallest_child], positions[smallest_child]
lowerCAmelCase , lowerCAmelCase : List[Any] = (
heap[start],
positions[start],
)
lowerCAmelCase , lowerCAmelCase : int = temp, tempa
lowerCAmelCase : Dict = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , snake_case__ )
self.top_to_bottom(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Tuple = position[index]
while index != 0:
lowerCAmelCase : List[Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
lowerCAmelCase : Optional[int] = heap[parent]
lowerCAmelCase : Optional[int] = position[parent]
self.set_position(position[parent] , snake_case__ )
else:
lowerCAmelCase : int = val
lowerCAmelCase : Optional[int] = temp
self.set_position(snake_case__ , snake_case__ )
break
lowerCAmelCase : str = parent
else:
lowerCAmelCase : Union[str, Any] = val
lowerCAmelCase : Tuple = temp
self.set_position(snake_case__ , 0 )
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Tuple = len(snake_case__ ) // 2 - 1
for i in range(snake_case__ , -1 , -1 ):
self.top_to_bottom(snake_case__ , snake_case__ , len(snake_case__ ) , snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Any = positions[0]
lowerCAmelCase : List[Any] = sys.maxsize
self.top_to_bottom(snake_case__ , 0 , len(snake_case__ ) , snake_case__ )
return temp
def a__ ( SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase : List[Any] = Heap()
lowerCAmelCase : Optional[int] = [0] * len(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[Any] = [-1] * len(SCREAMING_SNAKE_CASE ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
lowerCAmelCase : int = [] # Heap of Distance of vertices from their neighboring vertex
lowerCAmelCase : Optional[int] = []
for vertex in range(len(SCREAMING_SNAKE_CASE ) ):
distance_tv.append(sys.maxsize )
positions.append(SCREAMING_SNAKE_CASE )
heap.node_position.append(SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = []
lowerCAmelCase : List[Any] = 1
lowerCAmelCase : Optional[int] = sys.maxsize
for neighbor, distance in adjacency_list[0]:
lowerCAmelCase : str = 0
lowerCAmelCase : Union[str, Any] = distance
heap.heapify(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for _ in range(1 , len(SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase : Dict = heap.delete_minimum(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
lowerCAmelCase : Tuple = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(SCREAMING_SNAKE_CASE )]
):
lowerCAmelCase : Dict = distance
heap.bottom_to_top(
SCREAMING_SNAKE_CASE , heap.get_position(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[int] = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
lowerCAmelCase__ = int(input('''Enter number of edges: ''').strip())
lowerCAmelCase__ = defaultdict(list)
for _ in range(edges_number):
lowerCAmelCase__ = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 108 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class A__ ( _snake_case , unittest.TestCase ):
lowercase = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def snake_case_ ( self , UpperCamelCase__=0 ) -> Tuple:
'''simple docstring'''
A_ = np.random.RandomState(UpperCamelCase__ )
A_ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = self.get_dummy_inputs()
A_ = pipe(**UpperCamelCase__ ).images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A_ = np.array([0.65072, 0.58492, 0.48219, 0.55521, 0.53180, 0.55939, 0.50697, 0.39800, 0.46455] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
A_ = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = self.get_dummy_inputs()
A_ = pipe(**UpperCamelCase__ ).images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A_ = np.array([0.65863, 0.59425, 0.49326, 0.56313, 0.53875, 0.56627, 0.51065, 0.39777, 0.46330] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
A_ = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = self.get_dummy_inputs()
A_ = pipe(**UpperCamelCase__ ).images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A_ = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
A_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
A_ = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = self.get_dummy_inputs()
A_ = pipe(**UpperCamelCase__ ).images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A_ = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
A_ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = self.get_dummy_inputs()
A_ = pipe(**UpperCamelCase__ ).images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A_ = np.array([0.53817, 0.60812, 0.47384, 0.49530, 0.51894, 0.49814, 0.47984, 0.38958, 0.44271] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
A_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = self.get_dummy_inputs()
A_ = pipe(**UpperCamelCase__ ).images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A_ = np.array([0.53895, 0.60808, 0.47933, 0.49608, 0.51886, 0.49950, 0.48053, 0.38957, 0.44200] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = self.get_dummy_inputs()
A_ = 3 * [inputs["""prompt"""]]
# forward
A_ = pipe(**UpperCamelCase__ )
A_ = output.images[0, -3:, -3:, -1]
A_ = self.get_dummy_inputs()
A_ = 3 * [inputs.pop("""prompt""" )]
A_ = pipe.tokenizer(
UpperCamelCase__ , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=UpperCamelCase__ , return_tensors="""np""" , )
A_ = text_inputs["""input_ids"""]
A_ = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
A_ = prompt_embeds
# forward
A_ = pipe(**UpperCamelCase__ )
A_ = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = self.get_dummy_inputs()
A_ = 3 * ["""this is a negative prompt"""]
A_ = negative_prompt
A_ = 3 * [inputs["""prompt"""]]
# forward
A_ = pipe(**UpperCamelCase__ )
A_ = output.images[0, -3:, -3:, -1]
A_ = self.get_dummy_inputs()
A_ = 3 * [inputs.pop("""prompt""" )]
A_ = []
for p in [prompt, negative_prompt]:
A_ = pipe.tokenizer(
UpperCamelCase__ , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=UpperCamelCase__ , return_tensors="""np""" , )
A_ = text_inputs["""input_ids"""]
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
A_ , A_ = embeds
# forward
A_ = pipe(**UpperCamelCase__ )
A_ = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@nightly
@require_onnxruntime
@require_torch_gpu
class A__ ( unittest.TestCase ):
@property
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = ort.SessionOptions()
A_ = False
return options
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
# using the PNDM scheduler by default
A_ = OnnxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = """A painting of a squirrel eating a burger"""
np.random.seed(0 )
A_ = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type="""np""" )
A_ = output.images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A_ = np.array([0.0452, 0.0390, 0.0087, 0.0350, 0.0617, 0.0364, 0.0544, 0.0523, 0.0720] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = DDIMScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
A_ = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = """open neural network exchange"""
A_ = np.random.RandomState(0 )
A_ = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCamelCase__ , output_type="""np""" )
A_ = output.images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A_ = np.array([0.2867, 0.1974, 0.1481, 0.7294, 0.7251, 0.6667, 0.4194, 0.5642, 0.6486] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
A_ = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = """open neural network exchange"""
A_ = np.random.RandomState(0 )
A_ = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCamelCase__ , output_type="""np""" )
A_ = output.images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A_ = np.array([0.2306, 0.1959, 0.1593, 0.6549, 0.6394, 0.5408, 0.5065, 0.6010, 0.6161] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = 0
def test_callback_fn(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> None:
A_ = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
A_ = latents[0, -3:, -3:, -1]
A_ = np.array(
[-0.6772, -0.3835, -1.2456, 0.1905, -1.0974, 0.6967, -1.9353, 0.0178, 1.0167] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
A_ = latents[0, -3:, -3:, -1]
A_ = np.array(
[-0.3351, 0.2241, -0.1837, -0.2325, -0.6577, 0.3393, -0.0241, 0.5899, 1.3875] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
A_ = False
A_ = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = """Andromeda galaxy in a bottle"""
A_ = np.random.RandomState(0 )
pipe(
prompt=UpperCamelCase__ , num_inference_steps=5 , guidance_scale=7.5 , generator=UpperCamelCase__ , callback=UpperCamelCase__ , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert pipe.safety_checker is None
A_ = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCamelCase__ )
A_ = OnnxStableDiffusionPipeline.from_pretrained(UpperCamelCase__ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
A_ = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
| 162 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _lowerCAmelCase ( metaclass=__snake_case ):
'''simple docstring'''
lowerCAmelCase_ = ["transformers", "torch", "note_seq"]
def __init__(self , *UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]:
requires_backends(self , ["""transformers""", """torch""", """note_seq"""] )
@classmethod
def lowercase (cls , *UpperCAmelCase , **UpperCAmelCase ) -> int:
requires_backends(cls , ["""transformers""", """torch""", """note_seq"""] )
@classmethod
def lowercase (cls , *UpperCAmelCase , **UpperCAmelCase ) -> str:
requires_backends(cls , ["""transformers""", """torch""", """note_seq"""] ) | 270 |
'''simple docstring'''
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("""repo_id""" , ["""canonical_dataset_name""", """org-name/dataset-name"""] )
@pytest.mark.parametrize("""path""" , ["""filename.csv""", """filename with blanks.csv"""] )
@pytest.mark.parametrize("""revision""" , [None, """v2"""] )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = hf_hub_url(repo_id=_SCREAMING_SNAKE_CASE , path=_SCREAMING_SNAKE_CASE , revision=_SCREAMING_SNAKE_CASE )
assert url == f"""https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(_SCREAMING_SNAKE_CASE )}""" | 270 | 1 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class __snake_case :
"""simple docstring"""
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = 0 ):
'''simple docstring'''
__A , __A : Optional[int] = row, column
__A : Any = [[default_value for c in range(__lowerCamelCase )] for r in range(__lowerCamelCase )]
def __str__( self ):
'''simple docstring'''
__A : Tuple = F"""Matrix consist of {self.row} rows and {self.column} columns\n"""
# Make string identifier
__A : Optional[int] = 0
for row_vector in self.array:
for obj in row_vector:
__A : Any = max(__lowerCamelCase , len(str(__lowerCamelCase ) ) )
__A : Dict = F"""%{max_element_length}s"""
# Make string and return
def single_line(__lowerCamelCase ) -> str:
nonlocal string_format_identifier
__A : List[Any] = '''['''
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(__lowerCamelCase ) for row_vector in self.array )
return s
def __repr__( self ):
'''simple docstring'''
return str(self )
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
if not (isinstance(__lowerCamelCase , (list, tuple) ) and len(__lowerCamelCase ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self , __lowerCamelCase ):
'''simple docstring'''
assert self.validate_indicies(__lowerCamelCase )
return self.array[loc[0]][loc[1]]
def __setitem__( self , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
assert self.validate_indicies(__lowerCamelCase )
__A : int = value
def __add__( self , __lowerCamelCase ):
'''simple docstring'''
assert isinstance(__lowerCamelCase , __lowerCamelCase )
assert self.row == another.row and self.column == another.column
# Add
__A : Tuple = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__A : int = self[r, c] + another[r, c]
return result
def __neg__( self ):
'''simple docstring'''
__A : Optional[int] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__A : int = -self[r, c]
return result
def __sub__( self , __lowerCamelCase ):
'''simple docstring'''
return self + (-another)
def __mul__( self , __lowerCamelCase ):
'''simple docstring'''
if isinstance(__lowerCamelCase , (int, float) ): # Scalar multiplication
__A : Dict = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__A : List[Any] = self[r, c] * another
return result
elif isinstance(__lowerCamelCase , __lowerCamelCase ): # Matrix multiplication
assert self.column == another.row
__A : List[Any] = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__A : Any = F"""Unsupported type given for another ({type(__lowerCamelCase )})"""
raise TypeError(__lowerCamelCase )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : List[str] = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
__A : str = self[r, c]
return result
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
assert isinstance(__lowerCamelCase , __lowerCamelCase ) and isinstance(__lowerCamelCase , __lowerCamelCase )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__A : Dict = v.transpose()
__A : Dict = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def __lowercase ( ) ->None:
'''simple docstring'''
__A : int = Matrix(3 ,3 ,0 )
for i in range(3 ):
__A : Any = 1
print(F"""a^(-1) is {ainv}""" )
# u, v
__A : Optional[int] = Matrix(3 ,1 ,0 )
__A , __A , __A : List[str] = 1, 2, -3
__A : Optional[int] = Matrix(3 ,1 ,0 )
__A , __A , __A : Tuple = 4, -2, 5
print(F"""u is {u}""" )
print(F"""v is {v}""" )
print(F"""uv^T is {u * v.transpose()}""" )
# Sherman Morrison
print(F"""(a + uv^T)^(-1) is {ainv.sherman_morrison(snake_case_ ,snake_case_ )}""" )
def __lowercase ( ) ->None:
'''simple docstring'''
import doctest
doctest.testmod()
testa()
| 179 |
"""simple docstring"""
def __lowercase ( snake_case_ : int ) ->int:
'''simple docstring'''
assert (
isinstance(snake_case_ ,snake_case_ ) and number_of_steps > 0
), F"""number_of_steps needs to be positive integer, your input {number_of_steps}"""
if number_of_steps == 1:
return 1
__A , __A : List[Any] = 1, 1
for _ in range(number_of_steps - 1 ):
__A , __A : List[str] = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 179 | 1 |
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __lowercase ( lowerCamelCase : Optional[Any] ):
UpperCamelCase_ : Optional[int] = []
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight",
F"stage{idx}.patch_embed.proj.weight",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias",
F"stage{idx}.patch_embed.proj.bias",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight",
F"stage{idx}.patch_embed.norm.weight",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias",
F"stage{idx}.patch_embed.norm.bias",
) )
return embed
def __lowercase ( lowerCamelCase : str , lowerCamelCase : Any ):
UpperCamelCase_ : str = []
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_q.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_q.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_k.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_k.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_v.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_v.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight",
F"stage{idx}.blocks.{cnt}.attn.proj.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias",
F"stage{idx}.blocks.{cnt}.attn.proj.bias",
) )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight", F"stage{idx}.blocks.{cnt}.mlp.fc1.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias", F"stage{idx}.blocks.{cnt}.mlp.fc1.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight", F"stage{idx}.blocks.{cnt}.mlp.fc2.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias", F"stage{idx}.blocks.{cnt}.mlp.fc2.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight", F"stage{idx}.blocks.{cnt}.norm1.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias", F"stage{idx}.blocks.{cnt}.norm1.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight", F"stage{idx}.blocks.{cnt}.norm2.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias", F"stage{idx}.blocks.{cnt}.norm2.bias") )
return attention_weights
def __lowercase ( lowerCamelCase : int ):
UpperCamelCase_ : List[str] = []
token.append((F"cvt.encoder.stages.{idx}.cls_token", 'stage2.cls_token') )
return token
def __lowercase ( ):
UpperCamelCase_ : Tuple = []
head.append(('layernorm.weight', 'norm.weight') )
head.append(('layernorm.bias', 'norm.bias') )
head.append(('classifier.weight', 'head.weight') )
head.append(('classifier.bias', 'head.bias') )
return head
def __lowercase ( lowerCamelCase : str , lowerCamelCase : List[str] , lowerCamelCase : Tuple , lowerCamelCase : Union[str, Any] ):
UpperCamelCase_ : int = 'imagenet-1k-id2label.json'
UpperCamelCase_ : Dict = 1000
UpperCamelCase_ : Optional[Any] = 'huggingface/label-files'
UpperCamelCase_ : Tuple = num_labels
UpperCamelCase_ : Tuple = json.load(open(cached_download(hf_hub_url(lowerCamelCase , lowerCamelCase , repo_type='dataset' ) ) , 'r' ) )
UpperCamelCase_ : str = {int(lowerCamelCase ): v for k, v in idalabel.items()}
UpperCamelCase_ : Optional[int] = idalabel
UpperCamelCase_ : List[str] = {v: k for k, v in idalabel.items()}
UpperCamelCase_ : List[Any] = CvtConfig(num_labels=lowerCamelCase , idalabel=lowerCamelCase , labelaid=lowerCamelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('/' , 1 )[-1][4:6] == "13":
UpperCamelCase_ : Optional[Any] = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('/' , 1 )[-1][4:6] == "21":
UpperCamelCase_ : Optional[Any] = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
UpperCamelCase_ : Dict = [2, 2, 20]
UpperCamelCase_ : int = [3, 12, 16]
UpperCamelCase_ : List[str] = [192, 768, 1024]
UpperCamelCase_ : Optional[Any] = CvtForImageClassification(lowerCamelCase )
UpperCamelCase_ : Optional[Any] = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
UpperCamelCase_ : List[str] = image_size
UpperCamelCase_ : Tuple = torch.load(lowerCamelCase , map_location=torch.device('cpu' ) )
UpperCamelCase_ : Optional[int] = OrderedDict()
UpperCamelCase_ : Any = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
UpperCamelCase_ : str = list_of_state_dict + cls_token(lowerCamelCase )
UpperCamelCase_ : Tuple = list_of_state_dict + embeddings(lowerCamelCase )
for cnt in range(config.depth[idx] ):
UpperCamelCase_ : Union[str, Any] = list_of_state_dict + attention(lowerCamelCase , lowerCamelCase )
UpperCamelCase_ : Optional[int] = list_of_state_dict + final()
for gg in list_of_state_dict:
print(lowerCamelCase )
for i in range(len(lowerCamelCase ) ):
UpperCamelCase_ : Any = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(lowerCamelCase )
model.save_pretrained(lowerCamelCase )
image_processor.save_pretrained(lowerCamelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--cvt_model',
default='cvt-w24',
type=str,
help='Name of the cvt model you\'d like to convert.',
)
parser.add_argument(
'--image_size',
default=384,
type=int,
help='Input Image Size',
)
parser.add_argument(
'--cvt_file_name',
default=r'cvtmodels\CvT-w24-384x384-IN-22k.pth',
type=str,
help='Input Image Size',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
a_ = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 50 | import numpy
# List of input, output pairs
a_ = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
a_ = (((515, 22, 13), 555), ((61, 35, 49), 150))
a_ = [2, 4, 1, 5]
a_ = len(train_data)
a_ = 0.009
def __lowercase ( lowerCamelCase : Optional[int] , lowerCamelCase : Any="train" ):
return calculate_hypothesis_value(lowerCamelCase , lowerCamelCase ) - output(
lowerCamelCase , lowerCamelCase )
def __lowercase ( lowerCamelCase : str ):
UpperCamelCase_ : List[str] = 0
for i in range(len(lowerCamelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def __lowercase ( lowerCamelCase : int , lowerCamelCase : Any ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def __lowercase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any] ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def __lowercase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict=m ):
UpperCamelCase_ : str = 0
for i in range(lowerCamelCase ):
if index == -1:
summation_value += _error(lowerCamelCase )
else:
summation_value += _error(lowerCamelCase ) * train_data[i][0][index]
return summation_value
def __lowercase ( lowerCamelCase : int ):
UpperCamelCase_ : List[str] = summation_of_cost_derivative(lowerCamelCase , lowerCamelCase ) / m
return cost_derivative_value
def __lowercase ( ):
global parameter_vector
# Tune these values to set a tolerance value for predicted output
UpperCamelCase_ : Optional[int] = 0.0_0_0_0_0_2
UpperCamelCase_ : Optional[int] = 0
UpperCamelCase_ : Union[str, Any] = 0
while True:
j += 1
UpperCamelCase_ : Dict = [0, 0, 0, 0]
for i in range(0 , len(lowerCamelCase ) ):
UpperCamelCase_ : Any = get_cost_derivative(i - 1 )
UpperCamelCase_ : List[str] = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
lowerCamelCase , lowerCamelCase , atol=lowerCamelCase , rtol=lowerCamelCase , ):
break
UpperCamelCase_ : Optional[Any] = temp_parameter_vector
print(('Number of iterations:', j) )
def __lowercase ( ):
for i in range(len(lowerCamelCase ) ):
print(('Actual output value:', output(lowerCamelCase , 'test' )) )
print(('Hypothesis output:', calculate_hypothesis_value(lowerCamelCase , 'test' )) )
if __name__ == "__main__":
run_gradient_descent()
print('\nTesting gradient descent for a linear hypothesis function.\n')
test_gradient_descent()
| 50 | 1 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class __A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase_ (self ):
UpperCamelCase__ = 1
UpperCamelCase__ = 3
UpperCamelCase__ = (32, 32)
UpperCamelCase__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
return image
@property
def UpperCAmelCase_ (self ):
torch.manual_seed(0 )
UpperCamelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def UpperCAmelCase_ (self ):
torch.manual_seed(0 )
UpperCamelCase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def UpperCAmelCase_ (self ):
torch.manual_seed(0 )
UpperCamelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(SCREAMING_SNAKE_CASE_ )
@property
def UpperCAmelCase_ (self ):
def extract(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
class __A:
"""simple docstring"""
def __init__(self ):
UpperCamelCase__ = torch.ones([0] )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
self.pixel_values.to(SCREAMING_SNAKE_CASE_ )
return self
return Out()
return extract
def UpperCAmelCase_ (self ):
UpperCamelCase__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ = self.dummy_cond_unet
UpperCamelCase__ = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=SCREAMING_SNAKE_CASE_ , set_alpha_to_one=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ = self.dummy_vae
UpperCamelCase__ = self.dummy_text_encoder
UpperCamelCase__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
UpperCamelCase__ = StableDiffusionPipeline(
unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=self.dummy_extractor , )
UpperCamelCase__ = sd_pipe.to(SCREAMING_SNAKE_CASE_ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = """A painting of a squirrel eating a burger"""
UpperCamelCase__ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
UpperCamelCase__ = sd_pipe([prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
UpperCamelCase__ = output.images
UpperCamelCase__ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
UpperCamelCase__ = sd_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=SCREAMING_SNAKE_CASE_ , )[0]
UpperCamelCase__ = image[0, -3:, -3:, -1]
UpperCamelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase__ = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase_ (self ):
UpperCamelCase__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ = self.dummy_cond_unet
UpperCamelCase__ = PNDMScheduler(skip_prk_steps=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.dummy_vae
UpperCamelCase__ = self.dummy_text_encoder
UpperCamelCase__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
UpperCamelCase__ = StableDiffusionPipeline(
unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=self.dummy_extractor , )
UpperCamelCase__ = sd_pipe.to(SCREAMING_SNAKE_CASE_ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = """A painting of a squirrel eating a burger"""
UpperCamelCase__ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
UpperCamelCase__ = sd_pipe([prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
UpperCamelCase__ = output.images
UpperCamelCase__ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
UpperCamelCase__ = sd_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=SCREAMING_SNAKE_CASE_ , )[0]
UpperCamelCase__ = image[0, -3:, -3:, -1]
UpperCamelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase__ = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase_ (self ):
UpperCamelCase__ = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" , safety_checker=SCREAMING_SNAKE_CASE_ )
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert isinstance(pipe.scheduler , SCREAMING_SNAKE_CASE_ )
assert pipe.safety_checker is None
UpperCamelCase__ = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = StableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
UpperCamelCase__ = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.dummy_cond_unet
UpperCamelCase__ = PNDMScheduler(skip_prk_steps=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.dummy_vae
UpperCamelCase__ = self.dummy_text_encoder
UpperCamelCase__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
UpperCamelCase__ = unet.half()
UpperCamelCase__ = vae.half()
UpperCamelCase__ = bert.half()
# make sure here that pndm scheduler skips prk
UpperCamelCase__ = StableDiffusionPipeline(
unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=self.dummy_extractor , )
UpperCamelCase__ = sd_pipe.to(SCREAMING_SNAKE_CASE_ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = """A painting of a squirrel eating a burger"""
UpperCamelCase__ = sd_pipe([prompt] , num_inference_steps=2 , output_type="""np""" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class __A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ (self ):
UpperCamelCase__ = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
UpperCamelCase__ = sd_pipe.to(SCREAMING_SNAKE_CASE_ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
UpperCamelCase__ = 40_03_66_03_46
UpperCamelCase__ = 7
# without safety guidance (sld_guidance_scale = 0)
UpperCamelCase__ = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = sd_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , num_inference_steps=50 , output_type="""np""" , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
UpperCamelCase__ = output.images
UpperCamelCase__ = image[0, -3:, -3:, -1]
UpperCamelCase__ = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
UpperCamelCase__ = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = sd_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , num_inference_steps=50 , output_type="""np""" , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCamelCase__ = output.images
UpperCamelCase__ = image[0, -3:, -3:, -1]
UpperCamelCase__ = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase_ (self ):
UpperCamelCase__ = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
UpperCamelCase__ = sd_pipe.to(SCREAMING_SNAKE_CASE_ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = """padme amidala taking a bath artwork, safe for work, no nudity"""
UpperCamelCase__ = 27_34_97_17_55
UpperCamelCase__ = 7
UpperCamelCase__ = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = sd_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , num_inference_steps=50 , output_type="""np""" , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
UpperCamelCase__ = output.images
UpperCamelCase__ = image[0, -3:, -3:, -1]
UpperCamelCase__ = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
UpperCamelCase__ = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = sd_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , num_inference_steps=50 , output_type="""np""" , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCamelCase__ = output.images
UpperCamelCase__ = image[0, -3:, -3:, -1]
UpperCamelCase__ = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase_ (self ):
UpperCamelCase__ = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
UpperCamelCase__ = sd_pipe.to(SCREAMING_SNAKE_CASE_ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
UpperCamelCase__ = 10_44_35_52_34
UpperCamelCase__ = 12
UpperCamelCase__ = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = sd_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , num_inference_steps=50 , output_type="""np""" , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
UpperCamelCase__ = output.images
UpperCamelCase__ = image[0, -3:, -3:, -1]
UpperCamelCase__ = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
UpperCamelCase__ = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = sd_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , num_inference_steps=50 , output_type="""np""" , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCamelCase__ = output.images
UpperCamelCase__ = image[0, -3:, -3:, -1]
UpperCamelCase__ = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 244 |
from __future__ import annotations
from typing import Generic, TypeVar
lowerCamelCase_ = TypeVar('''T''')
class __A( Generic[T] ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = data
UpperCamelCase__ = self
UpperCamelCase__ = 0
class __A( Generic[T] ):
"""simple docstring"""
def __init__(self ):
# map from node name to the node object
UpperCamelCase__ = {}
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
# create a new set with x as its member
UpperCamelCase__ = DisjointSetTreeNode(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
# find the set x belongs to (with path-compression)
UpperCamelCase__ = self.map[data]
if elem_ref != elem_ref.parent:
UpperCamelCase__ = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
# helper function for union operation
if nodea.rank > nodea.rank:
UpperCamelCase__ = nodea
else:
UpperCamelCase__ = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
# merge 2 disjoint sets
self.link(self.find_set(SCREAMING_SNAKE_CASE_ ) , self.find_set(SCREAMING_SNAKE_CASE_ ) )
class __A( Generic[T] ):
"""simple docstring"""
def __init__(self ):
# connections: map from the node to the neighbouring nodes (with weights)
UpperCamelCase__ = {}
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
# add a node ONLY if its not present in the graph
if node not in self.connections:
UpperCamelCase__ = {}
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
# add an edge with the given weight
self.add_node(SCREAMING_SNAKE_CASE_ )
self.add_node(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = weight
UpperCamelCase__ = weight
def UpperCAmelCase_ (self ):
UpperCamelCase__ = []
UpperCamelCase__ = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda SCREAMING_SNAKE_CASE_ : x[2] )
# creating the disjoint set
UpperCamelCase__ = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(SCREAMING_SNAKE_CASE_ )
# MST generation
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = edges[index]
index += 1
UpperCamelCase__ = disjoint_set.find_set(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = disjoint_set.find_set(SCREAMING_SNAKE_CASE_ )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
disjoint_set.union(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return graph
| 244 | 1 |
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
def __A ( self : Optional[int] ) -> Optional[int]:
super().tearDown()
gc.collect()
def __A ( self : str ) -> Tuple:
__lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
__lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
__lowerCamelCase = '''xvjiarui/stable-diffusion-2-inpainting'''
__lowerCamelCase , __lowerCamelCase = FlaxStableDiffusionInpaintPipeline.from_pretrained(UpperCamelCase__ , safety_checker=UpperCamelCase__ )
__lowerCamelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
__lowerCamelCase = jax.random.PRNGKey(0 )
__lowerCamelCase = 50
__lowerCamelCase = jax.device_count()
__lowerCamelCase = num_samples * [prompt]
__lowerCamelCase = num_samples * [init_image]
__lowerCamelCase = num_samples * [mask_image]
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = pipeline.prepare_inputs(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# shard inputs and rng
__lowerCamelCase = replicate(UpperCamelCase__ )
__lowerCamelCase = jax.random.split(UpperCamelCase__ , jax.device_count() )
__lowerCamelCase = shard(UpperCamelCase__ )
__lowerCamelCase = shard(UpperCamelCase__ )
__lowerCamelCase = shard(UpperCamelCase__ )
__lowerCamelCase = pipeline(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , jit=UpperCamelCase__ )
__lowerCamelCase = output.images.reshape(UpperCamelCase__ , 5_12 , 5_12 , 3 )
__lowerCamelCase = images[0, 2_53:2_56, 2_53:2_56, -1]
__lowerCamelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__lowerCamelCase = jnp.array(
[0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 363 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
def __A ( self : List[str] ) -> Dict:
__lowerCamelCase = tempfile.mkdtemp()
# fmt: off
__lowerCamelCase = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
__lowerCamelCase = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
__lowerCamelCase = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
__lowerCamelCase = {'''unk_token''': '''<unk>'''}
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48145466, 0.4578275, 0.40821073],
'''image_std''': [0.26862954, 0.26130258, 0.27577711],
}
__lowerCamelCase = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE__ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __A ( self : int , **SCREAMING_SNAKE_CASE__ : int ) -> Any:
return CLIPTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def __A ( self : Dict , **SCREAMING_SNAKE_CASE__ : Dict ) -> Union[str, Any]:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def __A ( self : Optional[int] , **SCREAMING_SNAKE_CASE__ : Any ) -> List[Any]:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def __A ( self : Dict ) -> Dict:
shutil.rmtree(self.tmpdirname )
def __A ( self : str ) -> Any:
__lowerCamelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowerCamelCase = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __A ( self : List[Any] ) -> List[str]:
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = self.get_rust_tokenizer()
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
processor_slow.save_pretrained(self.tmpdirname )
__lowerCamelCase = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
processor_fast.save_pretrained(self.tmpdirname )
__lowerCamelCase = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE__ )
def __A ( self : Union[str, Any] ) -> int:
__lowerCamelCase = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowerCamelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__lowerCamelCase = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
__lowerCamelCase = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE__ )
def __A ( self : Optional[Any] ) -> Union[str, Any]:
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.prepare_image_inputs()
__lowerCamelCase = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='''np''' )
__lowerCamelCase = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __A ( self : List[Any] ) -> Optional[int]:
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = '''lower newer'''
__lowerCamelCase = processor(text=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = tokenizer(SCREAMING_SNAKE_CASE__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __A ( self : List[Any] ) -> Any:
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = '''lower newer'''
__lowerCamelCase = self.prepare_image_inputs()
__lowerCamelCase = processor(text=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
processor()
def __A ( self : Optional[Any] ) -> List[str]:
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.prepare_image_inputs()
__lowerCamelCase = self.prepare_image_inputs()
__lowerCamelCase = processor(images=SCREAMING_SNAKE_CASE__ , visual_prompt=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''conditional_pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
processor()
def __A ( self : List[Any] ) -> Any:
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowerCamelCase = processor.batch_decode(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 339 | 0 |
def lowerCamelCase__ ( __lowerCamelCase : Tuple = 1000 ):
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 114 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCAmelCase_ ( __a , __a ) -> Optional[Any]:
"""simple docstring"""
assert isinstance(__a , __a )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: Any =tmp_path / "cache"
lowerCamelCase__: Optional[int] ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase__: int =ParquetDatasetReader(__a , cache_dir=__a , keep_in_memory=__a ).read()
_check_parquet_dataset(__a , __a )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: int =tmp_path / "cache"
lowerCamelCase__: Tuple ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: Union[str, Any] =features.copy() if features else default_expected_features
lowerCamelCase__: Union[str, Any] =(
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase__: int =ParquetDatasetReader(__a , features=__a , cache_dir=__a ).read()
_check_parquet_dataset(__a , __a )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCAmelCase_ ( __a , __a , __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: Union[str, Any] =tmp_path / "cache"
lowerCamelCase__: Dict ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: Optional[int] =ParquetDatasetReader(__a , cache_dir=__a , split=__a ).read()
_check_parquet_dataset(__a , __a )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def lowerCAmelCase_ ( __a , __a , __a ) -> Dict:
"""simple docstring"""
if issubclass(__a , __a ):
lowerCamelCase__: str =parquet_path
elif issubclass(__a , __a ):
lowerCamelCase__: str =[parquet_path]
lowerCamelCase__: Optional[Any] =tmp_path / "cache"
lowerCamelCase__: Any ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: Optional[int] =ParquetDatasetReader(__a , cache_dir=__a ).read()
_check_parquet_dataset(__a , __a )
def lowerCAmelCase_ ( __a , __a , __a=("train",) ) -> Union[str, Any]:
"""simple docstring"""
assert isinstance(__a , __a )
for split in splits:
lowerCamelCase__: Optional[Any] =dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: Any =tmp_path / "cache"
lowerCamelCase__: str ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase__: List[str] =ParquetDatasetReader(
{"train": parquet_path} , cache_dir=__a , keep_in_memory=__a ).read()
_check_parquet_datasetdict(__a , __a )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: List[Any] =tmp_path / "cache"
lowerCamelCase__: Any ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: int =features.copy() if features else default_expected_features
lowerCamelCase__: Union[str, Any] =(
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase__: Union[str, Any] =ParquetDatasetReader({"train": parquet_path} , features=__a , cache_dir=__a ).read()
_check_parquet_datasetdict(__a , __a )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[str]:
"""simple docstring"""
if split:
lowerCamelCase__: Union[str, Any] ={split: parquet_path}
else:
lowerCamelCase__: int ="train"
lowerCamelCase__: Union[str, Any] ={"train": parquet_path, "test": parquet_path}
lowerCamelCase__: int =tmp_path / "cache"
lowerCamelCase__: Union[str, Any] ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: Optional[Any] =ParquetDatasetReader(__a , cache_dir=__a ).read()
_check_parquet_datasetdict(__a , __a , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCAmelCase_ ( __a , __a ) -> Tuple:
"""simple docstring"""
lowerCamelCase__: Tuple =ParquetDatasetWriter(__a , tmp_path / "foo.parquet" )
assert writer.write() > 0
lowerCamelCase__: Tuple =pq.ParquetFile(tmp_path / "foo.parquet" )
lowerCamelCase__: Optional[int] =pf.read()
assert dataset.data.table == output_table
def lowerCAmelCase_ ( __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: List[str] =str(shared_datadir / "test_image_rgb.jpg" )
lowerCamelCase__: Union[str, Any] ={"image": [image_path]}
lowerCamelCase__: int =Features({"image": Image()} )
lowerCamelCase__: Tuple =Dataset.from_dict(__a , features=__a )
lowerCamelCase__: Optional[int] =ParquetDatasetWriter(__a , tmp_path / "foo.parquet" )
assert writer.write() > 0
lowerCamelCase__: Optional[Any] =Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
lowerCamelCase__: List[str] =ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=__a ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCAmelCase_ ( __a , __a ) -> Any:
"""simple docstring"""
assert get_writer_batch_size(__a ) == expected
| 10 | 0 |
'''simple docstring'''
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
lowercase : str = False
try:
lowercase : int = _is_package_available("google.colab")
except ModuleNotFoundError:
pass
@input.register
class __UpperCAmelCase :
def __init__( self , lowerCAmelCase_ = None , lowerCAmelCase_ = [] ):
"""simple docstring"""
_snake_case = 0
_snake_case = choices
_snake_case = prompt
if sys.platform == "win32":
_snake_case = '*'
else:
_snake_case = '➔ '
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = "" ):
"""simple docstring"""
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , lowerCAmelCase_ )
else:
forceWrite(self.choices[index] , lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
if index == self.position:
forceWrite(F' {self.arrow_char} ' )
self.write_choice(lowerCAmelCase_ )
else:
forceWrite(F' {self.choices[index]}' )
reset_cursor()
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = 1 ):
"""simple docstring"""
_snake_case = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(lowerCAmelCase_ )
move_cursor(lowerCAmelCase_ , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['up'] )
def lowerCamelCase ( self ):
"""simple docstring"""
self.move_direction(Direction.UP )
@input.mark(KEYMAP['down'] )
def lowerCamelCase ( self ):
"""simple docstring"""
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['newline'] )
def lowerCamelCase ( self ):
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , 'DOWN' )
return self.position
@input.mark(KEYMAP['interrupt'] )
def lowerCamelCase ( self ):
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , 'DOWN' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(lowerCAmelCase_ )] for number in range(10 )] )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = int(chr(self.current_selection ) )
_snake_case = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , lowerCAmelCase_ )
else:
return
else:
return
def lowerCamelCase ( self , lowerCAmelCase_ = 0 ):
"""simple docstring"""
if self.prompt:
linebreak()
forceWrite(self.prompt , '\n' )
if in_colab:
forceWrite('Please input a choice index (starting from 0), and press enter' , '\n' )
else:
forceWrite('Please select a choice using the arrow or number keys, and selecting with enter' , '\n' )
_snake_case = default_choice
for i in range(len(self.choices ) ):
self.print_choice(lowerCAmelCase_ )
forceWrite('\n' )
move_cursor(len(self.choices ) - self.position , 'UP' )
with cursor.hide():
while True:
if in_colab:
try:
_snake_case = int(builtins.input() )
except ValueError:
_snake_case = default_choice
else:
_snake_case = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , 'UP' )
clear_line()
self.write_choice(lowerCAmelCase_ , '\n' )
return choice
| 160 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowercase : Optional[Any] = logging.get_logger(__name__)
lowercase : List[str] = {
"Helsinki-NLP/opus-mt-en-de": "https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json",
# See all Marian models at https://huggingface.co/models?filter=marian
}
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = """marian"""
__lowercase = ["""past_key_values"""]
__lowercase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , lowerCAmelCase_=5_81_01 , lowerCAmelCase_=None , lowerCAmelCase_=10_24 , lowerCAmelCase_=12 , lowerCAmelCase_=40_96 , lowerCAmelCase_=16 , lowerCAmelCase_=12 , lowerCAmelCase_=40_96 , lowerCAmelCase_=16 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_="gelu" , lowerCAmelCase_=10_24 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.02 , lowerCAmelCase_=5_81_00 , lowerCAmelCase_=False , lowerCAmelCase_=5_81_00 , lowerCAmelCase_=0 , lowerCAmelCase_=0 , lowerCAmelCase_=True , **lowerCAmelCase_ , ):
"""simple docstring"""
_snake_case = vocab_size
_snake_case = decoder_vocab_size or vocab_size
_snake_case = max_position_embeddings
_snake_case = d_model
_snake_case = encoder_ffn_dim
_snake_case = encoder_layers
_snake_case = encoder_attention_heads
_snake_case = decoder_ffn_dim
_snake_case = decoder_layers
_snake_case = decoder_attention_heads
_snake_case = dropout
_snake_case = attention_dropout
_snake_case = activation_dropout
_snake_case = activation_function
_snake_case = init_std
_snake_case = encoder_layerdrop
_snake_case = decoder_layerdrop
_snake_case = use_cache
_snake_case = encoder_layers
_snake_case = scale_embedding # scale factor will be sqrt(d_model) if True
_snake_case = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , decoder_start_token_id=lowerCAmelCase_ , forced_eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ , )
class __UpperCAmelCase ( _lowerCamelCase ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def lowerCamelCase ( self ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_snake_case = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_snake_case = {0: 'batch'}
_snake_case = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
_snake_case = {0: 'batch', 1: 'decoder_sequence'}
_snake_case = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase_ , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
_snake_case = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_snake_case , _snake_case = self.num_layers
for i in range(lowerCAmelCase_ ):
_snake_case = {0: 'batch', 2: 'past_sequence + sequence'}
_snake_case = {0: 'batch', 2: 'past_sequence + sequence'}
else:
_snake_case = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def lowerCamelCase ( self ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_snake_case = super().outputs
else:
_snake_case = super(lowerCAmelCase_ , self ).outputs
if self.use_past:
_snake_case , _snake_case = self.num_layers
for i in range(lowerCAmelCase_ ):
_snake_case = {0: 'batch', 2: 'past_sequence + sequence'}
_snake_case = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = -1 , lowerCAmelCase_ = -1 , lowerCAmelCase_ = False , lowerCAmelCase_ = None , ):
"""simple docstring"""
_snake_case = self._generate_dummy_inputs_for_encoder_and_decoder(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Generate decoder inputs
_snake_case = seq_length if not self.use_past else 1
_snake_case = self._generate_dummy_inputs_for_encoder_and_decoder(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = {F'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
_snake_case = dict(**lowerCAmelCase_ , **lowerCAmelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_snake_case , _snake_case = common_inputs['input_ids'].shape
_snake_case = common_inputs['decoder_input_ids'].shape[1]
_snake_case , _snake_case = self.num_attention_heads
_snake_case = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_snake_case = decoder_seq_length + 3
_snake_case = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_snake_case = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(lowerCAmelCase_ , lowerCAmelCase_ )] , dim=1 )
_snake_case = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_snake_case , _snake_case = self.num_layers
_snake_case = min(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = max(lowerCAmelCase_ , lowerCAmelCase_ ) - min_num_layers
_snake_case = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(lowerCAmelCase_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCAmelCase_ ),
torch.zeros(lowerCAmelCase_ ),
torch.zeros(lowerCAmelCase_ ),
torch.zeros(lowerCAmelCase_ ),
) )
# TODO: test this.
_snake_case = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(lowerCAmelCase_ , lowerCAmelCase_ ):
common_inputs["past_key_values"].append((torch.zeros(lowerCAmelCase_ ), torch.zeros(lowerCAmelCase_ )) )
return common_inputs
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = -1 , lowerCAmelCase_ = -1 , lowerCAmelCase_ = False , lowerCAmelCase_ = None , ):
"""simple docstring"""
_snake_case = self._generate_dummy_inputs_for_encoder_and_decoder(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_snake_case , _snake_case = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_snake_case = seqlen + 2
_snake_case , _snake_case = self.num_layers
_snake_case , _snake_case = self.num_attention_heads
_snake_case = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_snake_case = common_inputs['attention_mask'].dtype
_snake_case = torch.cat(
[common_inputs['attention_mask'], torch.ones(lowerCAmelCase_ , lowerCAmelCase_ , dtype=lowerCAmelCase_ )] , dim=1 )
_snake_case = [
(torch.zeros(lowerCAmelCase_ ), torch.zeros(lowerCAmelCase_ )) for _ in range(lowerCAmelCase_ )
]
return common_inputs
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = -1 , lowerCAmelCase_ = -1 , lowerCAmelCase_ = False , lowerCAmelCase_ = None , ):
"""simple docstring"""
_snake_case = compute_effective_axis_dimension(
lowerCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_snake_case = tokenizer.num_special_tokens_to_add(lowerCAmelCase_ )
_snake_case = compute_effective_axis_dimension(
lowerCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase_ )
# Generate dummy inputs according to compute batch and sequence
_snake_case = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
_snake_case = dict(tokenizer(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ ) )
return common_inputs
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = -1 , lowerCAmelCase_ = -1 , lowerCAmelCase_ = False , lowerCAmelCase_ = None , ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_snake_case = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCAmelCase_ , batch_size=lowerCAmelCase_ , seq_length=lowerCAmelCase_ , is_pair=lowerCAmelCase_ , framework=lowerCAmelCase_ )
else:
_snake_case = self._generate_dummy_inputs_for_causal_lm(
lowerCAmelCase_ , batch_size=lowerCAmelCase_ , seq_length=lowerCAmelCase_ , is_pair=lowerCAmelCase_ , framework=lowerCAmelCase_ )
return common_inputs
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_snake_case = super()._flatten_past_key_values_(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
_snake_case = super(lowerCAmelCase_ , self )._flatten_past_key_values_(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
@property
def lowerCamelCase ( self ):
"""simple docstring"""
return 1E-4
| 160 | 1 |
"""simple docstring"""
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
_lowerCAmelCase :Dict = 299_792_458
# Symbols
_lowerCAmelCase :Any = symbols('ct x y z')
def lowerCamelCase_ (UpperCamelCase__ : int ):
if velocity > c:
raise ValueError('''Speed must not exceed light speed 299,792,458 [m/s]!''' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('''Speed must be greater than or equal to 1!''' )
return velocity / c
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] ):
return 1 / sqrt(1 - beta(lowerCamelCase_ ) ** 2 )
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] ):
return np.array(
[
[gamma(lowerCamelCase_ ), -gamma(lowerCamelCase_ ) * beta(lowerCamelCase_ ), 0, 0],
[-gamma(lowerCamelCase_ ) * beta(lowerCamelCase_ ), gamma(lowerCamelCase_ ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def lowerCamelCase_ (UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] = None ):
# Ensure event is not empty
if event is None:
_UpperCAmelCase : int = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(lowerCamelCase_ ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
_lowerCAmelCase :List[Any] = transform(29_979_245)
print('Example of four vector: ')
print(f"ct' = {four_vector[0]}")
print(f"x' = {four_vector[1]}")
print(f"y' = {four_vector[2]}")
print(f"z' = {four_vector[3]}")
# Substitute symbols with numerical values
_lowerCAmelCase :Tuple = {ct: c, x: 1, y: 1, z: 1}
_lowerCAmelCase :Optional[int] = [four_vector[i].subs(sub_dict) for i in range(4)]
print(f"\n{numerical_vector}")
| 263 |
import warnings
warnings.warn(
'memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: '
'`from accelerate import find_executable_batch_size` to avoid this warning.',
FutureWarning,
)
| 207 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
_A = {"""configuration_beit""": ["""BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BeitConfig""", """BeitOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["""BeitFeatureExtractor"""]
_A = ["""BeitImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""BEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BeitForImageClassification""",
"""BeitForMaskedImageModeling""",
"""BeitForSemanticSegmentation""",
"""BeitModel""",
"""BeitPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""FlaxBeitForImageClassification""",
"""FlaxBeitForMaskedImageModeling""",
"""FlaxBeitModel""",
"""FlaxBeitPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 358 |
"""simple docstring"""
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class _lowerCamelCase :
def __init__( self : str , UpperCamelCase : int , UpperCamelCase : str=99 , UpperCamelCase : Optional[int]=13 , UpperCamelCase : Dict=7 , UpperCamelCase : List[Any]=9 , UpperCamelCase : Optional[int]=True , UpperCamelCase : Any=True , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Optional[int]=32 , UpperCamelCase : str=5 , UpperCamelCase : int=4 , UpperCamelCase : Optional[Any]=37 , UpperCamelCase : Tuple=8 , UpperCamelCase : Any=0.1 , UpperCamelCase : Union[str, Any]=0.002 , UpperCamelCase : List[Any]=1 , UpperCamelCase : Any=0 , UpperCamelCase : Optional[Any]=0 , UpperCamelCase : Dict=None , UpperCamelCase : str=None , ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = parent
lowerCAmelCase__ : Union[str, Any] = batch_size
lowerCAmelCase__ : List[str] = encoder_seq_length
lowerCAmelCase__ : Any = decoder_seq_length
# For common tests
lowerCAmelCase__ : Union[str, Any] = self.decoder_seq_length
lowerCAmelCase__ : List[Any] = is_training
lowerCAmelCase__ : Optional[Any] = use_attention_mask
lowerCAmelCase__ : str = use_labels
lowerCAmelCase__ : Any = vocab_size
lowerCAmelCase__ : Any = hidden_size
lowerCAmelCase__ : Optional[int] = num_hidden_layers
lowerCAmelCase__ : Any = num_attention_heads
lowerCAmelCase__ : int = d_ff
lowerCAmelCase__ : int = relative_attention_num_buckets
lowerCAmelCase__ : Union[str, Any] = dropout_rate
lowerCAmelCase__ : str = initializer_factor
lowerCAmelCase__ : Tuple = eos_token_id
lowerCAmelCase__ : List[str] = pad_token_id
lowerCAmelCase__ : str = decoder_start_token_id
lowerCAmelCase__ : Optional[Any] = None
lowerCAmelCase__ : Dict = decoder_layers
def _lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
return TaConfig.from_pretrained("""google/umt5-base""" )
def _lowerCAmelCase ( self : str , UpperCamelCase : Optional[int] , UpperCamelCase : Dict , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any]=None , UpperCamelCase : List[Any]=None , UpperCamelCase : List[Any]=None , UpperCamelCase : int=None , UpperCamelCase : List[Any]=None , ) -> List[Any]:
"""simple docstring"""
if attention_mask is None:
lowerCAmelCase__ : Optional[Any] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
lowerCAmelCase__ : List[str] = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
lowerCAmelCase__ : str = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=UpperCamelCase )
if decoder_head_mask is None:
lowerCAmelCase__ : Optional[int] = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=UpperCamelCase )
if cross_attn_head_mask is None:
lowerCAmelCase__ : Optional[int] = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=UpperCamelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def _lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
lowerCAmelCase__ : int = input_ids.clamp(self.pad_token_id + 1 )
lowerCAmelCase__ : Optional[Any] = decoder_input_ids.clamp(self.pad_token_id + 1 )
lowerCAmelCase__ : Tuple = self.get_config()
lowerCAmelCase__ : Dict = config.num_attention_heads
lowerCAmelCase__ : Dict = self.prepare_inputs_dict(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return config, input_dict
def _lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.prepare_config_and_inputs()
return config, inputs_dict
def _lowerCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
return TaConfig(
vocab_size=1_66 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _lowerCAmelCase ( self : List[Any] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : List[str] , UpperCamelCase : Optional[int] , UpperCamelCase : Any , UpperCamelCase : Any , ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = UMTaModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
lowerCAmelCase__ : Optional[int] = model(
input_ids=UpperCamelCase , decoder_input_ids=UpperCamelCase , attention_mask=UpperCamelCase , decoder_attention_mask=UpperCamelCase , )
lowerCAmelCase__ : Optional[int] = model(input_ids=UpperCamelCase , decoder_input_ids=UpperCamelCase )
lowerCAmelCase__ : List[Any] = result.last_hidden_state
lowerCAmelCase__ : Any = result.past_key_values
lowerCAmelCase__ : str = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(UpperCamelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : List[str] , UpperCamelCase : int , UpperCamelCase : Optional[int] , UpperCamelCase : Any , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[int] , ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Dict = UMTaModel(config=UpperCamelCase ).get_decoder().to(UpperCamelCase ).eval()
# first forward pass
lowerCAmelCase__ : Optional[int] = model(UpperCamelCase , use_cache=UpperCamelCase )
lowerCAmelCase__ : Any = model(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = model(UpperCamelCase , use_cache=UpperCamelCase )
self.parent.assertTrue(len(UpperCamelCase ) == len(UpperCamelCase ) )
self.parent.assertTrue(len(UpperCamelCase ) == len(UpperCamelCase ) + 1 )
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCAmelCase__ : Any = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
lowerCAmelCase__ : List[str] = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase__ : List[str] = model(UpperCamelCase )["""last_hidden_state"""]
lowerCAmelCase__ : Any = model(UpperCamelCase , past_key_values=UpperCamelCase )["""last_hidden_state"""]
# select random slice
lowerCAmelCase__ : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase__ : List[Any] = output_from_no_past[:, -1, random_slice_idx].detach()
lowerCAmelCase__ : Optional[int] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1E-3 ) )
def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : str , UpperCamelCase : int , ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = UMTaModel(config=UpperCamelCase ).to(UpperCamelCase ).half().eval()
lowerCAmelCase__ : Dict = model(**UpperCamelCase )["""last_hidden_state"""]
self.parent.assertFalse(torch.isnan(UpperCamelCase ).any().item() )
@require_torch
class _lowerCamelCase ( a_ , a_ , a_ , unittest.TestCase ):
_lowerCamelCase :List[Any] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
_lowerCamelCase :List[str] = (UMTaForConditionalGeneration,) if is_torch_available() else ()
_lowerCamelCase :Optional[Any] = (
{
"conversational": UMTaForConditionalGeneration,
"feature-extraction": UMTaModel,
"summarization": UMTaForConditionalGeneration,
"text2text-generation": UMTaForConditionalGeneration,
"translation": UMTaForConditionalGeneration,
"question-answering": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
_lowerCamelCase :Dict = True
_lowerCamelCase :Optional[Any] = False
_lowerCamelCase :List[str] = False
_lowerCamelCase :Dict = True
_lowerCamelCase :str = True
# The small UMT5 model needs higher percentages for CPU/MP tests
_lowerCamelCase :Optional[int] = [0.8, 0.9]
def _lowerCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : Any = UMTaModelTester(self )
@unittest.skip("""Test has a segmentation fault on torch 1.8.0""" )
def _lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : List[str] = UMTaModel(config_and_inputs[0] ).to(UpperCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
UpperCamelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f"""{tmpdirname}/t5_test.onnx""" , export_params=UpperCamelCase , opset_version=9 , input_names=["""input_ids""", """decoder_input_ids"""] , )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def _lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*UpperCamelCase )
def _lowerCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = ["""encoder_attentions""", """decoder_attentions""", """cross_attentions"""]
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : List[Any] = config_and_inputs[0]
lowerCAmelCase__ : int = UMTaForConditionalGeneration(UpperCamelCase ).eval()
model.to(UpperCamelCase )
lowerCAmelCase__ : List[Any] = {
"""head_mask""": torch.zeros(config.num_layers , config.num_heads , device=UpperCamelCase ),
"""decoder_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCamelCase ),
"""cross_attn_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCamelCase ),
}
for attn_name, (name, mask) in zip(UpperCamelCase , head_masking.items() ):
lowerCAmelCase__ : Union[str, Any] = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
lowerCAmelCase__ : Tuple = torch.ones(
config.num_decoder_layers , config.num_heads , device=UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = model.generate(
config_and_inputs[1]["""input_ids"""] , num_beams=1 , max_length=3 , output_attentions=UpperCamelCase , return_dict_in_generate=UpperCamelCase , **UpperCamelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
lowerCAmelCase__ : str = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("""Does not work on the tiny model as we keep hitting edge cases.""" )
def _lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCamelCase ( unittest.TestCase ):
@slow
@unittest.skip(
"""Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged""" )
def _lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Dict = UMTaForConditionalGeneration.from_pretrained("""google/umt5-small""" , return_dict=UpperCamelCase ).to(UpperCamelCase )
lowerCAmelCase__ : Dict = AutoTokenizer.from_pretrained("""google/umt5-small""" , use_fast=UpperCamelCase , legacy=UpperCamelCase )
lowerCAmelCase__ : int = [
"""Bonjour monsieur <extra_id_0> bien <extra_id_1>.""",
"""No se como puedo <extra_id_0>.""",
"""This is the reason why we <extra_id_0> them.""",
"""The <extra_id_0> walks in <extra_id_1>, seats""",
"""A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.""",
]
lowerCAmelCase__ : Union[str, Any] = tokenizer(UpperCamelCase , return_tensors="""pt""" , padding=UpperCamelCase ).input_ids
# fmt: off
lowerCAmelCase__ : List[Any] = torch.tensor(
[
[ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1],
] )
# fmt: on
torch.testing.assert_allclose(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = model.generate(input_ids.to(UpperCamelCase ) )
lowerCAmelCase__ : int = [
"""<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>""",
"""<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
]
lowerCAmelCase__ : Any = tokenizer.batch_decode(UpperCamelCase )
self.assertEqual(UpperCamelCase , UpperCamelCase )
| 212 | 0 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 48 |
'''simple docstring'''
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self : List[Any] , _A : str , _A : str=13 , _A : Union[str, Any]=7 , _A : Tuple=True , _A : Dict=True , _A : List[str]=True , _A : Optional[int]=True , _A : Dict=99 , _A : Optional[Any]=32 , _A : Optional[int]=5 , _A : str=4 , _A : str=37 , _A : Tuple="gelu" , _A : Any=0.1 , _A : Dict=0.1 , _A : str=512 , _A : Tuple=16 , _A : str=2 , _A : int=0.02 , _A : int=False , _A : List[str]=True , _A : List[Any]="None" , _A : List[str]=3 , _A : Optional[Any]=4 , _A : Dict=None , ) -> Dict:
__magic_name__ : Union[str, Any] = parent
__magic_name__ : Any = batch_size
__magic_name__ : Optional[int] = seq_length
__magic_name__ : List[str] = is_training
__magic_name__ : Optional[Any] = use_input_mask
__magic_name__ : Dict = use_token_type_ids
__magic_name__ : str = use_labels
__magic_name__ : int = vocab_size
__magic_name__ : List[Any] = hidden_size
__magic_name__ : Dict = num_hidden_layers
__magic_name__ : Dict = num_attention_heads
__magic_name__ : Tuple = intermediate_size
__magic_name__ : Any = hidden_act
__magic_name__ : Union[str, Any] = hidden_dropout_prob
__magic_name__ : Union[str, Any] = attention_probs_dropout_prob
__magic_name__ : List[Any] = max_position_embeddings
__magic_name__ : Any = type_vocab_size
__magic_name__ : Union[str, Any] = type_sequence_label_size
__magic_name__ : Union[str, Any] = initializer_range
__magic_name__ : str = num_labels
__magic_name__ : Tuple = num_choices
__magic_name__ : Any = relative_attention
__magic_name__ : str = position_biased_input
__magic_name__ : str = pos_att_type
__magic_name__ : Union[str, Any] = scope
def __lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
__magic_name__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : List[Any] = None
if self.use_input_mask:
__magic_name__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__magic_name__ : int = None
if self.use_token_type_ids:
__magic_name__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ : List[str] = None
__magic_name__ : Tuple = None
__magic_name__ : Union[str, Any] = None
if self.use_labels:
__magic_name__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def __lowerCAmelCase ( self : str ) -> Optional[Any]:
__magic_name__ : List[Any] = self.get_config()
__magic_name__ : Union[str, Any] = 300
return config
def __lowerCAmelCase ( self : int , _A : Dict ) -> Tuple:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def __lowerCAmelCase ( self : Any , _A : Optional[int] , _A : Optional[Any] , _A : Optional[int] , _A : Optional[int] , _A : Any , _A : str , _A : List[Any] ) -> List[Any]:
__magic_name__ : Dict = DebertaModel(config=_A )
model.to(_A )
model.eval()
__magic_name__ : Optional[Any] = model(_A , attention_mask=_A , token_type_ids=_A )[0]
__magic_name__ : Optional[int] = model(_A , token_type_ids=_A )[0]
__magic_name__ : List[str] = model(_A )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def __lowerCAmelCase ( self : Any , _A : Union[str, Any] , _A : Optional[Any] , _A : Dict , _A : Optional[Any] , _A : Dict , _A : Optional[Any] , _A : Optional[int] ) -> Dict:
__magic_name__ : List[str] = DebertaForMaskedLM(config=_A )
model.to(_A )
model.eval()
__magic_name__ : List[str] = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self : str , _A : Union[str, Any] , _A : List[str] , _A : Optional[int] , _A : Optional[int] , _A : str , _A : Union[str, Any] , _A : Any ) -> Union[str, Any]:
__magic_name__ : Optional[int] = self.num_labels
__magic_name__ : Optional[Any] = DebertaForSequenceClassification(_A )
model.to(_A )
model.eval()
__magic_name__ : Any = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(_A )
def __lowerCAmelCase ( self : Tuple , _A : str , _A : str , _A : int , _A : str , _A : int , _A : Optional[int] , _A : List[str] ) -> Optional[int]:
__magic_name__ : str = self.num_labels
__magic_name__ : int = DebertaForTokenClassification(config=_A )
model.to(_A )
model.eval()
__magic_name__ : List[str] = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self : Optional[Any] , _A : str , _A : Tuple , _A : Optional[int] , _A : Any , _A : Optional[int] , _A : Dict , _A : Union[str, Any] ) -> List[Any]:
__magic_name__ : int = DebertaForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
__magic_name__ : Optional[int] = model(
_A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
__magic_name__ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : int = config_and_inputs
__magic_name__ : Optional[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : List[Any] = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
A_ : Tuple = (
{
"""feature-extraction""": DebertaModel,
"""fill-mask""": DebertaForMaskedLM,
"""question-answering""": DebertaForQuestionAnswering,
"""text-classification""": DebertaForSequenceClassification,
"""token-classification""": DebertaForTokenClassification,
"""zero-shot""": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ : Union[str, Any] = True
A_ : Any = False
A_ : Dict = False
A_ : str = False
A_ : Dict = False
def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
__magic_name__ : List[str] = DebertaModelTester(self )
__magic_name__ : Tuple = ConfigTester(self , config_class=_A , hidden_size=37 )
def __lowerCAmelCase ( self : List[str] ) -> Tuple:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
__magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*_A )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
__magic_name__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*_A )
def __lowerCAmelCase ( self : Any ) -> str:
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*_A )
def __lowerCAmelCase ( self : Any ) -> Tuple:
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*_A )
def __lowerCAmelCase ( self : str ) -> List[Any]:
__magic_name__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*_A )
@slow
def __lowerCAmelCase ( self : str ) -> Optional[Any]:
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : int = DebertaModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason='Model not available yet' )
def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
pass
@slow
def __lowerCAmelCase ( self : Dict ) -> Tuple:
__magic_name__ : int = DebertaModel.from_pretrained('microsoft/deberta-base' )
__magic_name__ : List[Any] = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
__magic_name__ : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__magic_name__ : Optional[int] = model(_A , attention_mask=_A )[0]
# compare the actual values for a slice.
__magic_name__ : Tuple = torch.tensor(
[[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1E-4 ) , F'{output[:, 1:4, 1:4]}' ) | 331 | 0 |
import numpy as np
import datasets
__lowerCamelCase = '\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n'
__lowerCamelCase = '\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n'
__lowerCamelCase = '\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Tuple:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ),
} ) , )
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : List[str] , snake_case__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
snake_case : Optional[Any] = np.array(__SCREAMING_SNAKE_CASE )
snake_case : Any = np.array(__SCREAMING_SNAKE_CASE )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("Expected `X` to be a 2D vector" )
if len(reference_distribution.shape ) != 2:
raise ValueError("Expected `reference_distribution` to be a 2D vector" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" )
# Get mahalanobis distance for each prediction
snake_case : Dict = X - np.mean(__SCREAMING_SNAKE_CASE )
snake_case : Tuple = np.cov(reference_distribution.T )
try:
snake_case : int = np.linalg.inv(__SCREAMING_SNAKE_CASE )
except np.linalg.LinAlgError:
snake_case : Optional[Any] = np.linalg.pinv(__SCREAMING_SNAKE_CASE )
snake_case : Union[str, Any] = np.dot(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case : List[str] = np.dot(__SCREAMING_SNAKE_CASE , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 362 |
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
__lowerCamelCase = logging.get_logger(__name__)
class UpperCAmelCase ( A_ ):
def __init__(self : List[Any] , *snake_case__ : List[str] , **snake_case__ : Dict ) -> None:
'''simple docstring'''
warnings.warn(
"The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PerceiverImageProcessor instead." , snake_case__ , )
super().__init__(*snake_case__ , **snake_case__ )
| 10 | 0 |
'''simple docstring'''
from __future__ import annotations
from random import random
class lowercase_ :
"""simple docstring"""
def __init__( self : Union[str, Any] ,lowercase__ : int | None = None ):
__lowercase = value
__lowercase = random()
__lowercase = None
__lowercase = None
def __repr__( self : List[str] ):
from pprint import pformat
if self.left is None and self.right is None:
return F"'{self.value}: {self.prior:.5}'"
else:
return pformat(
{F"{self.value}: {self.prior:.5}": (self.left, self.right)} ,indent=1 )
def __str__( self : List[Any] ):
__lowercase = str(self.value ) + ''' '''
__lowercase = str(self.left or '''''' )
__lowercase = str(self.right or '''''' )
return value + left + right
def _A ( A__ , A__ ):
"""simple docstring"""
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
__lowercase , __lowercase = split(root.left , A__ )
return left, root
else:
__lowercase , __lowercase = split(root.right , A__ )
return root, right
def _A ( A__ , A__ ):
"""simple docstring"""
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
__lowercase = merge(left.right , A__ )
return left
else:
__lowercase = merge(A__ , right.left )
return right
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = Node(A__ )
__lowercase , __lowercase = split(A__ , A__ )
return merge(merge(A__ , A__ ) , A__ )
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase , __lowercase = split(A__ , value - 1 )
__lowercase , __lowercase = split(A__ , A__ )
return merge(A__ , A__ )
def _A ( A__ ):
"""simple docstring"""
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=''',''' )
inorder(root.right )
def _A ( A__ , A__ ):
"""simple docstring"""
for arg in args.split():
if arg[0] == "+":
__lowercase = insert(A__ , int(arg[1:] ) )
elif arg[0] == "-":
__lowercase = erase(A__ , int(arg[1:] ) )
else:
print('''Unknown command''' )
return root
def _A ( ):
"""simple docstring"""
__lowercase = None
print(
'''enter numbers to create a tree, + value to add value into treap, '''
'''- value to erase all nodes with value. \'q\' to quit. ''' )
__lowercase = input()
while args != "q":
__lowercase = interact_treap(A__ , A__ )
print(A__ )
__lowercase = input()
print('''good by!''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 104 |
'''simple docstring'''
from numpy import exp, pi, sqrt
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : float = 1.0 ) -> int:
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 185 | 0 |
'''simple docstring'''
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def a_ ( __snake_case : Optional[int] , __snake_case : Optional[int] , __snake_case : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
# Initialise PyTorch model
lowerCamelCase_ =AlbertConfig.from_json_file(__snake_case )
print(F'''Building PyTorch model from configuration: {config}''' )
lowerCamelCase_ =AlbertForPreTraining(__snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_albert(__snake_case , __snake_case , __snake_case )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __snake_case )
if __name__ == "__main__":
a_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--albert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained ALBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
a_ : List[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 6 |
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def a_ ( __snake_case : str , __snake_case : bool = True , __snake_case : float = math.inf , __snake_case : float = -math.inf , __snake_case : float = math.inf , __snake_case : float = -math.inf , __snake_case : bool = False , __snake_case : float = 100 , __snake_case : float = 0.0_1 , __snake_case : float = 1 , ) -> Any:
"""simple docstring"""
lowerCamelCase_ =False
lowerCamelCase_ =search_prob
lowerCamelCase_ =start_temperate
lowerCamelCase_ =[]
lowerCamelCase_ =0
lowerCamelCase_ =None
while not search_end:
lowerCamelCase_ =current_state.score()
if best_state is None or current_score > best_state.score():
lowerCamelCase_ =current_state
scores.append(__snake_case )
iterations += 1
lowerCamelCase_ =None
lowerCamelCase_ =current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
lowerCamelCase_ =random.randint(0 , len(__snake_case ) - 1 ) # picking a random neighbor
lowerCamelCase_ =neighbors.pop(__snake_case )
lowerCamelCase_ =picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
lowerCamelCase_ =change * -1 # in case we are finding minimum
if change > 0: # improves the solution
lowerCamelCase_ =picked_neighbor
else:
lowerCamelCase_ =(math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
lowerCamelCase_ =picked_neighbor
lowerCamelCase_ =current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
lowerCamelCase_ =True
else:
lowerCamelCase_ =next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(__snake_case ) , __snake_case )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def a_ ( __snake_case : List[str] , __snake_case : Optional[int] ) -> str:
"""simple docstring"""
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
a_ : str = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
a_ : Optional[int] = simulated_annealing(
prob, find_max=False, max_x=1_00, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
a_ : str = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
a_ : List[str] = simulated_annealing(
prob, find_max=True, max_x=1_00, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def a_ ( __snake_case : Dict , __snake_case : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return (3 * x**2) - (6 * y)
a_ : Tuple = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a_ : Optional[Any] = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
a_ : Dict = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a_ : Optional[int] = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
| 6 | 1 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class UpperCAmelCase :
@staticmethod
def lowercase__ ( *__snake_case : int , **__snake_case : str ) -> Dict:
pass
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
A__ : str =(
'''https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'''
)
@is_pipeline_test
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
_lowercase: Optional[Any] = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def lowercase__ ( self : Dict , __snake_case : Optional[int] , __snake_case : int , __snake_case : Union[str, Any] ) -> List[Any]:
_lowerCAmelCase = pipeline(
"""document-question-answering""" , model=__snake_case , tokenizer=__snake_case , image_processor=__snake_case )
_lowerCAmelCase = INVOICE_URL
_lowerCAmelCase = list(zip(*apply_tesseract(load_image(__snake_case ) , __snake_case , """""" ) ) )
_lowerCAmelCase = """What is the placebo?"""
_lowerCAmelCase = [
{
"""image""": load_image(__snake_case ),
"""question""": question,
},
{
"""image""": image,
"""question""": question,
},
{
"""image""": image,
"""question""": question,
"""word_boxes""": word_boxes,
},
]
return dqa_pipeline, examples
def lowercase__ ( self : str , __snake_case : Dict , __snake_case : Any ) -> Dict:
_lowerCAmelCase = dqa_pipeline(__snake_case , top_k=2 )
self.assertEqual(
__snake_case , [
[
{"""score""": ANY(__snake_case ), """answer""": ANY(__snake_case ), """start""": ANY(__snake_case ), """end""": ANY(__snake_case )},
{"""score""": ANY(__snake_case ), """answer""": ANY(__snake_case ), """start""": ANY(__snake_case ), """end""": ANY(__snake_case )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def lowercase__ ( self : Optional[Any] ) -> Tuple:
_lowerCAmelCase = pipeline("""document-question-answering""" , model="""hf-internal-testing/tiny-random-layoutlmv2""" )
_lowerCAmelCase = INVOICE_URL
_lowerCAmelCase = """How many cats are there?"""
_lowerCAmelCase = [
{"""score""": 0.00_01, """answer""": """oy 2312/2019""", """start""": 38, """end""": 39},
{"""score""": 0.00_01, """answer""": """oy 2312/2019 DUE""", """start""": 38, """end""": 40},
]
_lowerCAmelCase = dqa_pipeline(image=__snake_case , question=__snake_case , top_k=2 )
self.assertEqual(nested_simplify(__snake_case , decimals=4 ) , __snake_case )
_lowerCAmelCase = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(nested_simplify(__snake_case , decimals=4 ) , __snake_case )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
_lowerCAmelCase = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
_lowerCAmelCase = dqa_pipeline(image=__snake_case , question=__snake_case , top_k=2 )
self.assertEqual(__snake_case , [] )
# We can optionnally pass directly the words and bounding boxes
_lowerCAmelCase = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = dqa_pipeline(image=__snake_case , question=__snake_case , words=__snake_case , boxes=__snake_case , top_k=2 )
self.assertEqual(__snake_case , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def lowercase__ ( self : Dict ) -> Union[str, Any]:
_lowerCAmelCase = pipeline(
"""document-question-answering""" , model="""tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa""" , revision="""9977165""" , )
_lowerCAmelCase = INVOICE_URL
_lowerCAmelCase = """What is the invoice number?"""
_lowerCAmelCase = dqa_pipeline(image=__snake_case , question=__snake_case , top_k=2 )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [
{"""score""": 0.99_44, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.00_09, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
_lowerCAmelCase = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [
{"""score""": 0.99_44, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.00_09, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
_lowerCAmelCase = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [
[
{"""score""": 0.99_44, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.00_09, """answer""": """us-001""", """start""": 16, """end""": 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def lowercase__ ( self : List[str] ) -> List[Any]:
_lowerCAmelCase = pipeline(
"""document-question-answering""" , model="""tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa""" , revision="""9977165""" , max_seq_len=50 , )
_lowerCAmelCase = INVOICE_URL
_lowerCAmelCase = """What is the invoice number?"""
_lowerCAmelCase = dqa_pipeline(image=__snake_case , question=__snake_case , top_k=2 )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [
{"""score""": 0.99_74, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.99_48, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
_lowerCAmelCase = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [
{"""score""": 0.99_74, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.99_48, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
_lowerCAmelCase = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [
[
{"""score""": 0.99_74, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.99_48, """answer""": """us-001""", """start""": 16, """end""": 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def lowercase__ ( self : Tuple ) -> Union[str, Any]:
_lowerCAmelCase = AutoTokenizer.from_pretrained(
"""impira/layoutlm-document-qa""" , revision="""3dc6de3""" , add_prefix_space=__snake_case )
_lowerCAmelCase = pipeline(
"""document-question-answering""" , model="""impira/layoutlm-document-qa""" , tokenizer=__snake_case , revision="""3dc6de3""" , )
_lowerCAmelCase = INVOICE_URL
_lowerCAmelCase = """What is the invoice number?"""
_lowerCAmelCase = dqa_pipeline(image=__snake_case , question=__snake_case , top_k=2 )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [
{"""score""": 0.42_51, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.08_19, """answer""": """1110212019""", """start""": 23, """end""": 23},
] , )
_lowerCAmelCase = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [
{"""score""": 0.42_51, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.08_19, """answer""": """1110212019""", """start""": 23, """end""": 23},
] , )
_lowerCAmelCase = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [
[
{"""score""": 0.42_51, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.08_19, """answer""": """1110212019""", """start""": 23, """end""": 23},
]
]
* 2 , )
_lowerCAmelCase = list(zip(*apply_tesseract(load_image(__snake_case ) , __snake_case , """""" ) ) )
# This model should also work if `image` is set to None
_lowerCAmelCase = dqa_pipeline({"""image""": None, """word_boxes""": word_boxes, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [
{"""score""": 0.42_51, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.08_19, """answer""": """1110212019""", """start""": 23, """end""": 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def lowercase__ ( self : Union[str, Any] ) -> Optional[int]:
_lowerCAmelCase = AutoTokenizer.from_pretrained(
"""impira/layoutlm-document-qa""" , revision="""3dc6de3""" , add_prefix_space=__snake_case )
_lowerCAmelCase = pipeline(
"""document-question-answering""" , model="""impira/layoutlm-document-qa""" , tokenizer=__snake_case , revision="""3dc6de3""" , max_seq_len=50 , )
_lowerCAmelCase = INVOICE_URL
_lowerCAmelCase = """What is the invoice number?"""
_lowerCAmelCase = dqa_pipeline(image=__snake_case , question=__snake_case , top_k=2 )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [
{"""score""": 0.99_99, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.99_98, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
_lowerCAmelCase = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [
[
{"""score""": 0.99_99, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.99_98, """answer""": """us-001""", """start""": 16, """end""": 16},
]
]
* 2 , )
_lowerCAmelCase = list(zip(*apply_tesseract(load_image(__snake_case ) , __snake_case , """""" ) ) )
# This model should also work if `image` is set to None
_lowerCAmelCase = dqa_pipeline({"""image""": None, """word_boxes""": word_boxes, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [
{"""score""": 0.99_99, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.99_98, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
@slow
@require_torch
def lowercase__ ( self : Dict ) -> Optional[int]:
_lowerCAmelCase = pipeline(
"""document-question-answering""" , model="""naver-clova-ix/donut-base-finetuned-docvqa""" , tokenizer=AutoTokenizer.from_pretrained("""naver-clova-ix/donut-base-finetuned-docvqa""" ) , feature_extractor="""naver-clova-ix/donut-base-finetuned-docvqa""" , )
_lowerCAmelCase = INVOICE_URL
_lowerCAmelCase = """What is the invoice number?"""
_lowerCAmelCase = dqa_pipeline(image=__snake_case , question=__snake_case , top_k=2 )
self.assertEqual(nested_simplify(__snake_case , decimals=4 ) , [{"""answer""": """us-001"""}] )
@require_tf
@unittest.skip("""Document question answering not implemented in TF""" )
def lowercase__ ( self : str ) -> Optional[Any]:
pass
| 70 |
"""simple docstring"""
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _snake_case ( *UpperCamelCase : str , UpperCamelCase : Optional[Union[Dict, Any]] = None , UpperCamelCase : Tuple=True , UpperCamelCase : Optional[int]=2 ):
from .. import __version__
UpperCAmelCase : Tuple = take_from
UpperCAmelCase : Optional[Any] = ()
if not isinstance(args[0] , UpperCamelCase ):
UpperCAmelCase : List[str] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(UpperCamelCase ).base_version ) >= version.parse(UpperCamelCase ):
raise ValueError(
F"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"
F" version {__version__} is >= {version_name}" )
UpperCAmelCase : Optional[int] = None
if isinstance(UpperCamelCase , UpperCamelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(UpperCamelCase ),)
UpperCAmelCase : List[str] = F"The `{attribute}` argument is deprecated and will be removed in version {version_name}."
elif hasattr(UpperCamelCase , UpperCamelCase ):
values += (getattr(UpperCamelCase , UpperCamelCase ),)
UpperCAmelCase : List[Any] = F"The `{attribute}` attribute is deprecated and will be removed in version {version_name}."
elif deprecated_kwargs is None:
UpperCAmelCase : int = F"`{attribute}` is deprecated and will be removed in version {version_name}."
if warning is not None:
UpperCAmelCase : Optional[Any] = warning + """ """ if standard_warn else """"""
warnings.warn(warning + message , UpperCamelCase , stacklevel=UpperCamelCase )
if isinstance(UpperCamelCase , UpperCamelCase ) and len(UpperCamelCase ) > 0:
UpperCAmelCase : Optional[int] = inspect.getouterframes(inspect.currentframe() )[1]
UpperCAmelCase : Union[str, Any] = call_frame.filename
UpperCAmelCase : List[Any] = call_frame.lineno
UpperCAmelCase : List[str] = call_frame.function
UpperCAmelCase , UpperCAmelCase : Optional[int] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`" )
if len(UpperCamelCase ) == 0:
return
elif len(UpperCamelCase ) == 1:
return values[0]
return values
| 109 | 0 |
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
_lowerCamelCase : Union[str, Any] = datasets.load_iris()
_lowerCamelCase : Union[str, Any] = np.array(data["""data"""])
_lowerCamelCase : Tuple = np.array(data["""target"""])
_lowerCamelCase : Union[str, Any] = data["""target_names"""]
_lowerCamelCase : int = train_test_split(X, y)
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
return np.linalg.norm(np.array(lowercase_ ) - np.array(lowercase_ ) )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_=5 ) -> Optional[Any]:
"""simple docstring"""
A__ = zip(lowercase_ , lowercase_ )
# List of distances of all points from the point to be classified
A__ = []
for data_point in data:
A__ = euclidean_distance(data_point[0] , lowercase_ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
A__ = [i[1] for i in sorted(lowercase_ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
A__ = Counter(lowercase_ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 358 |
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> str:
"""simple docstring"""
if "cls_token" in name:
A__ = name.replace('''cls_token''' , '''vit.embeddings.cls_token''' )
if "mask_token" in name:
A__ = name.replace('''mask_token''' , '''decoder.mask_token''' )
if "decoder_pos_embed" in name:
A__ = name.replace('''decoder_pos_embed''' , '''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
A__ = name.replace('''pos_embed''' , '''vit.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
A__ = name.replace('''patch_embed.proj''' , '''vit.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
A__ = name.replace('''patch_embed.norm''' , '''vit.embeddings.norm''' )
if "decoder_blocks" in name:
A__ = name.replace('''decoder_blocks''' , '''decoder.decoder_layers''' )
if "blocks" in name:
A__ = name.replace('''blocks''' , '''vit.encoder.layer''' )
if "attn.proj" in name:
A__ = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
A__ = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
A__ = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
A__ = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
A__ = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
A__ = name.replace('''mlp.fc2''' , '''output.dense''' )
if "decoder_embed" in name:
A__ = name.replace('''decoder_embed''' , '''decoder.decoder_embed''' )
if "decoder_norm" in name:
A__ = name.replace('''decoder_norm''' , '''decoder.decoder_norm''' )
if "decoder_pred" in name:
A__ = name.replace('''decoder_pred''' , '''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name:
A__ = name.replace('''norm.weight''' , '''vit.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name:
A__ = name.replace('''norm.bias''' , '''vit.layernorm.bias''' )
return name
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
A__ = orig_state_dict.pop(lowercase_ )
if "qkv" in key:
A__ = key.split('''.''' )
A__ = int(key_split[1] )
if "decoder_blocks" in key:
A__ = config.decoder_hidden_size
A__ = '''decoder.decoder_layers.'''
if "weight" in key:
A__ = val[:dim, :]
A__ = val[dim : dim * 2, :]
A__ = val[-dim:, :]
elif "bias" in key:
A__ = val[:dim]
A__ = val[dim : dim * 2]
A__ = val[-dim:]
else:
A__ = config.hidden_size
A__ = '''vit.encoder.layer.'''
if "weight" in key:
A__ = val[:dim, :]
A__ = val[dim : dim * 2, :]
A__ = val[-dim:, :]
elif "bias" in key:
A__ = val[:dim]
A__ = val[dim : dim * 2]
A__ = val[-dim:]
else:
A__ = val
return orig_state_dict
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Tuple:
"""simple docstring"""
A__ = ViTMAEConfig()
if "large" in checkpoint_url:
A__ = 1_024
A__ = 4_096
A__ = 24
A__ = 16
elif "huge" in checkpoint_url:
A__ = 14
A__ = 1_280
A__ = 5_120
A__ = 32
A__ = 16
A__ = ViTMAEForPreTraining(lowercase_ )
A__ = torch.hub.load_state_dict_from_url(lowercase_ , map_location='''cpu''' )['''model''']
A__ = ViTMAEImageProcessor(size=config.image_size )
A__ = convert_state_dict(lowercase_ , lowercase_ )
model.load_state_dict(lowercase_ )
model.eval()
A__ = '''https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg'''
A__ = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
A__ = ViTMAEImageProcessor(size=config.image_size )
A__ = image_processor(images=lowercase_ , return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
A__ = model(**lowercase_ )
A__ = outputs.logits
if "large" in checkpoint_url:
A__ = torch.tensor(
[[-0.73_09, -0.71_28, -1.01_69], [-1.01_61, -0.90_58, -1.18_78], [-1.04_78, -0.94_11, -1.19_11]] )
elif "huge" in checkpoint_url:
A__ = torch.tensor(
[[-1.15_99, -0.91_99, -1.22_21], [-1.19_52, -0.92_69, -1.23_07], [-1.21_43, -0.93_37, -1.22_62]] )
else:
A__ = torch.tensor(
[[-0.91_92, -0.84_81, -1.12_59], [-1.13_49, -1.00_34, -1.25_99], [-1.17_57, -1.04_29, -1.27_26]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , lowercase_ , atol=1E-4 )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowercase_ )
if __name__ == "__main__":
_lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_lowerCamelCase : Optional[int] = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 231 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.