code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase : List[Any] = {'configuration_opt': ['OPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OPTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] = [
'OPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OPTForCausalLM',
'OPTModel',
'OPTPreTrainedModel',
'OPTForSequenceClassification',
'OPTForQuestionAnswering',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = ['TFOPTForCausalLM', 'TFOPTModel', 'TFOPTPreTrainedModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = [
'FlaxOPTForCausalLM',
'FlaxOPTModel',
'FlaxOPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
_lowercase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 49 | """simple docstring"""
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase ) -> str:
return "".join(sorted(__UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase ) -> list[str]:
return word_by_signature[signature(__UpperCAmelCase )]
_A = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8')
_A = sorted({word.strip().lower() for word in data.splitlines()})
_A = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
_A = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('anagrams.txt', 'w') as file:
file.write('all_anagrams = \n ')
file.write(pprint.pformat(all_anagrams))
| 159 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"xlm-mlm-en-2048": "https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json",
"xlm-mlm-ende-1024": "https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json",
"xlm-mlm-enfr-1024": "https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json",
"xlm-mlm-enro-1024": "https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json",
"xlm-mlm-tlm-xnli15-1024": "https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json",
"xlm-mlm-xnli15-1024": "https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json",
"xlm-clm-enfr-1024": "https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json",
"xlm-clm-ende-1024": "https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json",
"xlm-mlm-17-1280": "https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json",
"xlm-mlm-100-1280": "https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json",
}
class a ( __magic_name__ ):
_snake_case = """xlm"""
_snake_case = {
"""hidden_size""": """emb_dim""",
"""num_attention_heads""": """n_heads""",
"""num_hidden_layers""": """n_layers""",
"""n_words""": """vocab_size""", # For backward compatibility
}
def __init__( self : Tuple, SCREAMING_SNAKE_CASE_ : Optional[Any]=3_01_45, SCREAMING_SNAKE_CASE_ : Tuple=20_48, SCREAMING_SNAKE_CASE_ : Union[str, Any]=12, SCREAMING_SNAKE_CASE_ : str=16, SCREAMING_SNAKE_CASE_ : Dict=0.1, SCREAMING_SNAKE_CASE_ : int=0.1, SCREAMING_SNAKE_CASE_ : Tuple=True, SCREAMING_SNAKE_CASE_ : Union[str, Any]=False, SCREAMING_SNAKE_CASE_ : Any=False, SCREAMING_SNAKE_CASE_ : Any=False, SCREAMING_SNAKE_CASE_ : str=1, SCREAMING_SNAKE_CASE_ : Tuple=True, SCREAMING_SNAKE_CASE_ : Any=5_12, SCREAMING_SNAKE_CASE_ : List[Any]=20_48**-0.5, SCREAMING_SNAKE_CASE_ : Any=1e-12, SCREAMING_SNAKE_CASE_ : Dict=0.02, SCREAMING_SNAKE_CASE_ : str=0, SCREAMING_SNAKE_CASE_ : int=1, SCREAMING_SNAKE_CASE_ : List[Any]=2, SCREAMING_SNAKE_CASE_ : Tuple=3, SCREAMING_SNAKE_CASE_ : List[Any]=5, SCREAMING_SNAKE_CASE_ : str=True, SCREAMING_SNAKE_CASE_ : Optional[int]="first", SCREAMING_SNAKE_CASE_ : Any=True, SCREAMING_SNAKE_CASE_ : List[str]=None, SCREAMING_SNAKE_CASE_ : Optional[int]=True, SCREAMING_SNAKE_CASE_ : List[str]=0.1, SCREAMING_SNAKE_CASE_ : Optional[Any]=5, SCREAMING_SNAKE_CASE_ : Tuple=5, SCREAMING_SNAKE_CASE_ : Optional[Any]=0, SCREAMING_SNAKE_CASE_ : str=0, SCREAMING_SNAKE_CASE_ : Optional[int]=2, SCREAMING_SNAKE_CASE_ : Optional[int]=0, **SCREAMING_SNAKE_CASE_ : Tuple, ):
snake_case : Dict = vocab_size
snake_case : str = emb_dim
snake_case : Any = n_layers
snake_case : str = n_heads
snake_case : Optional[int] = dropout
snake_case : Any = attention_dropout
snake_case : List[str] = gelu_activation
snake_case : Tuple = sinusoidal_embeddings
snake_case : List[str] = causal
snake_case : str = asm
snake_case : Any = n_langs
snake_case : Dict = use_lang_emb
snake_case : List[Any] = layer_norm_eps
snake_case : Any = bos_index
snake_case : List[Any] = eos_index
snake_case : Tuple = pad_index
snake_case : Optional[int] = unk_index
snake_case : List[Any] = mask_index
snake_case : Any = is_encoder
snake_case : Tuple = max_position_embeddings
snake_case : Any = embed_init_std
snake_case : List[str] = init_std
snake_case : int = summary_type
snake_case : str = summary_use_proj
snake_case : Tuple = summary_activation
snake_case : Optional[int] = summary_proj_to_labels
snake_case : Tuple = summary_first_dropout
snake_case : int = start_n_top
snake_case : str = end_n_top
snake_case : int = mask_token_id
snake_case : Union[str, Any] = lang_id
if "n_words" in kwargs:
snake_case : str = kwargs['''n_words''']
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_, bos_token_id=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
class a ( __magic_name__ ):
@property
def __snake_case ( self : Optional[Any] ):
if self.task == "multiple-choice":
snake_case : int = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
snake_case : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 721 |
'''simple docstring'''
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("0.8.3"):
raise Exception("requires gluonnlp == 0.8.3")
if version.parse(mx.__version__) != version.parse("1.5.0"):
raise Exception("requires mxnet == 1.5.0")
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = "The Nymphenburg Palace is a beautiful palace in Munich!"
def A ( A_ : str , A_ : str ):
snake_case : str = {
'''attention_cell''': '''multi_head''',
'''num_layers''': 4,
'''units''': 1024,
'''hidden_size''': 768,
'''max_length''': 512,
'''num_heads''': 8,
'''scaled''': True,
'''dropout''': 0.1,
'''use_residual''': True,
'''embed_size''': 1024,
'''embed_dropout''': 0.1,
'''word_embed''': None,
'''layer_norm_eps''': 1e-5,
'''token_type_vocab_size''': 2,
}
snake_case : int = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
snake_case : int = BERTEncoder(
attention_cell=predefined_args['''attention_cell'''] , num_layers=predefined_args['''num_layers'''] , units=predefined_args['''units'''] , hidden_size=predefined_args['''hidden_size'''] , max_length=predefined_args['''max_length'''] , num_heads=predefined_args['''num_heads'''] , scaled=predefined_args['''scaled'''] , dropout=predefined_args['''dropout'''] , output_attention=A_ , output_all_encodings=A_ , use_residual=predefined_args['''use_residual'''] , activation=predefined_args.get('''activation''' , '''gelu''' ) , layer_norm_eps=predefined_args.get('''layer_norm_eps''' , A_ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
snake_case : List[Any] = '''openwebtext_ccnews_stories_books_cased'''
# Specify download folder to Gluonnlp's vocab
snake_case : Optional[Any] = os.path.join(get_home_dir() , '''models''' )
snake_case : Dict = _load_vocab(A_ , A_ , A_ , cls=A_ )
snake_case : Tuple = nlp.model.BERTModel(
A_ , len(A_ ) , units=predefined_args['''units'''] , embed_size=predefined_args['''embed_size'''] , embed_dropout=predefined_args['''embed_dropout'''] , word_embed=predefined_args['''word_embed'''] , use_pooler=A_ , use_token_type_embed=A_ , token_type_vocab_size=predefined_args['''token_type_vocab_size'''] , use_classifier=A_ , use_decoder=A_ , )
original_bort.load_parameters(A_ , cast_dtype=A_ , ignore_extra=A_ )
snake_case : Union[str, Any] = original_bort._collect_params_with_prefix()
# Build our config 🤗
snake_case : Optional[int] = {
'''architectures''': ['''BertForMaskedLM'''],
'''attention_probs_dropout_prob''': predefined_args['''dropout'''],
'''hidden_act''': '''gelu''',
'''hidden_dropout_prob''': predefined_args['''dropout'''],
'''hidden_size''': predefined_args['''embed_size'''],
'''initializer_range''': 0.02,
'''intermediate_size''': predefined_args['''hidden_size'''],
'''layer_norm_eps''': predefined_args['''layer_norm_eps'''],
'''max_position_embeddings''': predefined_args['''max_length'''],
'''model_type''': '''bort''',
'''num_attention_heads''': predefined_args['''num_heads'''],
'''num_hidden_layers''': predefined_args['''num_layers'''],
'''pad_token_id''': 1, # 2 = BERT, 1 = RoBERTa
'''type_vocab_size''': 1, # 2 = BERT, 1 = RoBERTa
'''vocab_size''': len(A_ ),
}
snake_case : Optional[int] = BertConfig.from_dict(A_ )
snake_case : int = BertForMaskedLM(A_ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(A_ : List[str] ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(A_ : Tuple , A_ : Dict ):
snake_case : Tuple = hf_param.shape
snake_case : List[Any] = to_torch(params[gluon_param] )
snake_case : int = gluon_param.shape
assert (
shape_hf == shape_gluon
), F"""The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"""
return gluon_param
snake_case : List[str] = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , '''word_embed.0.weight''' )
snake_case : Optional[Any] = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , '''encoder.position_weight''' )
snake_case : Dict = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , '''encoder.layer_norm.beta''' )
snake_case : Tuple = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , '''encoder.layer_norm.gamma''' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
snake_case : str = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
snake_case : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
snake_case : BertSelfAttention = layer.attention.self
snake_case : Optional[int] = check_and_map_params(
self_attn.key.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_key.bias""" )
snake_case : Any = check_and_map_params(
self_attn.key.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_key.weight""" )
snake_case : Tuple = check_and_map_params(
self_attn.query.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_query.bias""" )
snake_case : Dict = check_and_map_params(
self_attn.query.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_query.weight""" )
snake_case : int = check_and_map_params(
self_attn.value.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_value.bias""" )
snake_case : int = check_and_map_params(
self_attn.value.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_value.weight""" )
# self attention output
snake_case : BertSelfOutput = layer.attention.output
snake_case : Optional[Any] = check_and_map_params(
self_output.dense.bias , F"""encoder.transformer_cells.{i}.proj.bias""" )
snake_case : Tuple = check_and_map_params(
self_output.dense.weight , F"""encoder.transformer_cells.{i}.proj.weight""" )
snake_case : int = check_and_map_params(
self_output.LayerNorm.bias , F"""encoder.transformer_cells.{i}.layer_norm.beta""" )
snake_case : Optional[int] = check_and_map_params(
self_output.LayerNorm.weight , F"""encoder.transformer_cells.{i}.layer_norm.gamma""" )
# intermediate
snake_case : BertIntermediate = layer.intermediate
snake_case : Optional[Any] = check_and_map_params(
intermediate.dense.bias , F"""encoder.transformer_cells.{i}.ffn.ffn_1.bias""" )
snake_case : Optional[int] = check_and_map_params(
intermediate.dense.weight , F"""encoder.transformer_cells.{i}.ffn.ffn_1.weight""" )
# output
snake_case : BertOutput = layer.output
snake_case : List[Any] = check_and_map_params(
bert_output.dense.bias , F"""encoder.transformer_cells.{i}.ffn.ffn_2.bias""" )
snake_case : int = check_and_map_params(
bert_output.dense.weight , F"""encoder.transformer_cells.{i}.ffn.ffn_2.weight""" )
snake_case : List[str] = check_and_map_params(
bert_output.LayerNorm.bias , F"""encoder.transformer_cells.{i}.ffn.layer_norm.beta""" )
snake_case : List[str] = check_and_map_params(
bert_output.LayerNorm.weight , F"""encoder.transformer_cells.{i}.ffn.layer_norm.gamma""" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
snake_case : Dict = RobertaTokenizer.from_pretrained('''roberta-base''' )
snake_case : Any = tokenizer.encode_plus(A_ )['''input_ids''']
# Get gluon output
snake_case : Dict = mx.nd.array([input_ids] )
snake_case : Optional[int] = original_bort(inputs=A_ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(A_ )
snake_case : Optional[int] = BertModel.from_pretrained(A_ )
hf_bort_model.eval()
snake_case : Any = tokenizer.encode_plus(A_ , return_tensors='''pt''' )
snake_case : Any = hf_bort_model(**A_ )[0]
snake_case : Optional[Any] = output_gluon[0].asnumpy()
snake_case : Union[str, Any] = output_hf[0].detach().numpy()
snake_case : Tuple = np.max(np.abs(hf_layer - gluon_layer ) ).item()
snake_case : Tuple = np.allclose(A_ , A_ , atol=1e-3 )
if success:
print('''✔️ Both model do output the same tensors''' )
else:
print('''❌ Both model do **NOT** output the same tensors''' )
print('''Absolute difference is:''' , A_ )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bort_checkpoint_path", default=None, type=str, required=True, help="Path the official Bort params file."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 555 | 0 |
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
lowercase = '''\
@inproceedings{kakwani2020indicnlpsuite,
title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},
author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},
year={2020},
booktitle={Findings of EMNLP},
}
'''
lowercase = '''\
IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide
variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.
'''
lowercase = '''
Compute IndicGLUE evaluation metric associated to each IndicGLUE dataset.
Args:
predictions: list of predictions to score (as int64),
except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).
references: list of ground truth labels corresponding to the predictions (as int64),
except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).
Returns: depending on the IndicGLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"precision": Precision@10
Examples:
>>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')
>>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'precision@10\': 1.0}
'''
def __lowerCAmelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str ) -> List[Any]:
return float((preds == labels).mean() )
def __lowerCAmelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple ) -> Optional[Any]:
lowerCamelCase_ = simple_accuracy(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCamelCase_ = float(fa_score(y_true=UpperCAmelCase__ , y_pred=UpperCAmelCase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def __lowerCAmelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int ) -> Dict:
lowerCamelCase_ = np.array(UpperCAmelCase__ )
lowerCamelCase_ = np.array(UpperCAmelCase__ )
lowerCamelCase_ = en_sentvecs.shape[0]
# mean centering
lowerCamelCase_ = en_sentvecs - np.mean(UpperCAmelCase__ , axis=0 )
lowerCamelCase_ = in_sentvecs - np.mean(UpperCAmelCase__ , axis=0 )
lowerCamelCase_ = cdist(UpperCAmelCase__ , UpperCAmelCase__ , """cosine""" )
lowerCamelCase_ = np.array(range(UpperCAmelCase__ ) )
lowerCamelCase_ = sim.argsort(axis=1 )[:, :1_0]
lowerCamelCase_ = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A( datasets.Metric ):
def lowercase__ ( self : Optional[Any] ):
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", """
"""\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", """
"""\"wiki-ner\"]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" )
if self.config_name != """cvit-mkb-clsr"""
else datasets.Sequence(datasets.Value("""float32""" ) ),
"""references""": datasets.Value("""int64""" )
if self.config_name != """cvit-mkb-clsr"""
else datasets.Sequence(datasets.Value("""float32""" ) ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if self.config_name != """cvit-mkb-clsr""" else None , )
def lowercase__ ( self : List[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[int] ):
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(__UpperCamelCase , __UpperCamelCase )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(__UpperCamelCase , __UpperCamelCase )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(__UpperCamelCase , __UpperCamelCase )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", """
"""\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", """
"""\"wiki-ner\"]""" )
| 272 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowercase = {
'''configuration_layoutlmv2''': ['''LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LayoutLMv2Config'''],
'''processing_layoutlmv2''': ['''LayoutLMv2Processor'''],
'''tokenization_layoutlmv2''': ['''LayoutLMv2Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''LayoutLMv2TokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''LayoutLMv2FeatureExtractor''']
lowercase = ['''LayoutLMv2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv2ForQuestionAnswering''',
'''LayoutLMv2ForSequenceClassification''',
'''LayoutLMv2ForTokenClassification''',
'''LayoutLMv2Layer''',
'''LayoutLMv2Model''',
'''LayoutLMv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 272 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['FNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['FNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'FNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FNetForMaskedLM',
'FNetForMultipleChoice',
'FNetForNextSentencePrediction',
'FNetForPreTraining',
'FNetForQuestionAnswering',
'FNetForSequenceClassification',
'FNetForTokenClassification',
'FNetLayer',
'FNetModel',
'FNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 715 |
"""simple docstring"""
def lowerCamelCase ( _UpperCamelCase : Dict ) -> Any:
'''simple docstring'''
if collection == []:
return []
# get some information about the collection
__UpperCAmelCase : List[str] = len(_UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = max(_UpperCamelCase )
__UpperCAmelCase : Optional[Any] = min(_UpperCamelCase )
# create the counting array
__UpperCAmelCase : List[str] = coll_max + 1 - coll_min
__UpperCAmelCase : int = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , _UpperCamelCase ):
__UpperCAmelCase : Tuple = counting_arr[i] + counting_arr[i - 1]
# create the output collection
__UpperCAmelCase : int = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , _UpperCamelCase ) ):
__UpperCAmelCase : str = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def lowerCamelCase ( _UpperCamelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
return "".join([chr(_UpperCamelCase ) for i in counting_sort([ord(_UpperCamelCase ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('thisisthestring') == "eghhiiinrsssttt"
UpperCAmelCase : List[str] = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase : Any = [int(item) for item in user_input.split(',')]
print(counting_sort(unsorted))
| 299 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def A_ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ : Optional[Any] = SamImageProcessor()
SCREAMING_SNAKE_CASE__ : str = SamProcessor(_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def A_ ( self : List[str], **_UpperCAmelCase : Optional[int] ) -> Any:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname, **_UpperCAmelCase ).image_processor
def A_ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def A_ ( self : int ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = [np.random.randint(2_5_5, size=(3, 3_0, 4_0_0), dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ : Optional[int] = [Image.fromarray(np.moveaxis(_UpperCAmelCase, 0, -1 ) ) for x in image_inputs]
return image_inputs
def A_ ( self : Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : int = self.get_image_processor(do_normalize=_UpperCAmelCase, padding_value=1.0 )
SCREAMING_SNAKE_CASE__ : Dict = SamProcessor.from_pretrained(self.tmpdirname, do_normalize=_UpperCAmelCase, padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, _UpperCAmelCase )
def A_ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : Optional[int] = SamProcessor(image_processor=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image_processor(_UpperCAmelCase, return_tensors="np" )
SCREAMING_SNAKE_CASE__ : Optional[int] = processor(images=_UpperCAmelCase, return_tensors="np" )
input_feat_extract.pop("original_sizes" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("reshaped_input_sizes" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1E-2 )
@require_torch
def A_ ( self : Dict ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : Dict = SamProcessor(image_processor=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : int = [torch.ones((1, 3, 5, 5) )]
SCREAMING_SNAKE_CASE__ : Tuple = [[1_7_6_4, 2_6_4_6]]
SCREAMING_SNAKE_CASE__ : Optional[int] = [[6_8_3, 1_0_2_4]]
SCREAMING_SNAKE_CASE__ : Dict = processor.post_process_masks(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
self.assertEqual(masks[0].shape, (1, 3, 1_7_6_4, 2_6_4_6) )
SCREAMING_SNAKE_CASE__ : Tuple = processor.post_process_masks(
_UpperCAmelCase, torch.tensor(_UpperCAmelCase ), torch.tensor(_UpperCAmelCase ) )
self.assertEqual(masks[0].shape, (1, 3, 1_7_6_4, 2_6_4_6) )
# should also work with np
SCREAMING_SNAKE_CASE__ : str = [np.ones((1, 3, 5, 5) )]
SCREAMING_SNAKE_CASE__ : List[str] = processor.post_process_masks(_UpperCAmelCase, np.array(_UpperCAmelCase ), np.array(_UpperCAmelCase ) )
self.assertEqual(masks[0].shape, (1, 3, 1_7_6_4, 2_6_4_6) )
SCREAMING_SNAKE_CASE__ : Tuple = [[1, 0], [0, 1]]
with self.assertRaises(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ : int = processor.post_process_masks(_UpperCAmelCase, np.array(_UpperCAmelCase ), np.array(_UpperCAmelCase ) )
@require_vision
@require_tf
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def A_ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ : List[Any] = SamImageProcessor()
SCREAMING_SNAKE_CASE__ : List[Any] = SamProcessor(_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def A_ ( self : Dict, **_UpperCAmelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname, **_UpperCAmelCase ).image_processor
def A_ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def A_ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = [np.random.randint(2_5_5, size=(3, 3_0, 4_0_0), dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ : Tuple = [Image.fromarray(np.moveaxis(_UpperCAmelCase, 0, -1 ) ) for x in image_inputs]
return image_inputs
def A_ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_image_processor(do_normalize=_UpperCAmelCase, padding_value=1.0 )
SCREAMING_SNAKE_CASE__ : Any = SamProcessor.from_pretrained(self.tmpdirname, do_normalize=_UpperCAmelCase, padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, _UpperCAmelCase )
def A_ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : Dict = SamProcessor(image_processor=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : int = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image_processor(_UpperCAmelCase, return_tensors="np" )
SCREAMING_SNAKE_CASE__ : int = processor(images=_UpperCAmelCase, return_tensors="np" )
input_feat_extract.pop("original_sizes" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("reshaped_input_sizes" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1E-2 )
@require_tf
def A_ ( self : Optional[int] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : List[Any] = SamProcessor(image_processor=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = [tf.ones((1, 3, 5, 5) )]
SCREAMING_SNAKE_CASE__ : Any = [[1_7_6_4, 2_6_4_6]]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [[6_8_3, 1_0_2_4]]
SCREAMING_SNAKE_CASE__ : Tuple = processor.post_process_masks(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, return_tensors="tf" )
self.assertEqual(masks[0].shape, (1, 3, 1_7_6_4, 2_6_4_6) )
SCREAMING_SNAKE_CASE__ : Any = processor.post_process_masks(
_UpperCAmelCase, tf.convert_to_tensor(_UpperCAmelCase ), tf.convert_to_tensor(_UpperCAmelCase ), return_tensors="tf", )
self.assertEqual(masks[0].shape, (1, 3, 1_7_6_4, 2_6_4_6) )
# should also work with np
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [np.ones((1, 3, 5, 5) )]
SCREAMING_SNAKE_CASE__ : Tuple = processor.post_process_masks(
_UpperCAmelCase, np.array(_UpperCAmelCase ), np.array(_UpperCAmelCase ), return_tensors="tf" )
self.assertEqual(masks[0].shape, (1, 3, 1_7_6_4, 2_6_4_6) )
SCREAMING_SNAKE_CASE__ : Optional[int] = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = processor.post_process_masks(
_UpperCAmelCase, np.array(_UpperCAmelCase ), np.array(_UpperCAmelCase ), return_tensors="tf" )
@require_vision
@require_torchvision
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def A_ ( self : List[str] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ : str = SamImageProcessor()
SCREAMING_SNAKE_CASE__ : str = SamProcessor(_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def A_ ( self : Tuple, **_UpperCAmelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname, **_UpperCAmelCase ).image_processor
def A_ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def A_ ( self : str ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = [np.random.randint(2_5_5, size=(3, 3_0, 4_0_0), dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ : int = [Image.fromarray(np.moveaxis(_UpperCAmelCase, 0, -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def A_ ( self : List[str] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : List[str] = SamProcessor(image_processor=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = np.random.randint(0, 2, size=(1, 3, 5, 5) ).astype(np.floataa )
SCREAMING_SNAKE_CASE__ : Dict = [tf.convert_to_tensor(_UpperCAmelCase )]
SCREAMING_SNAKE_CASE__ : List[str] = [torch.tensor(_UpperCAmelCase )]
SCREAMING_SNAKE_CASE__ : List[str] = [[1_7_6_4, 2_6_4_6]]
SCREAMING_SNAKE_CASE__ : Dict = [[6_8_3, 1_0_2_4]]
SCREAMING_SNAKE_CASE__ : List[str] = processor.post_process_masks(
_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, return_tensors="tf" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = processor.post_process_masks(
_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, return_tensors="pt" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def A_ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : List[str] = SamProcessor(image_processor=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ : Tuple = image_processor(_UpperCAmelCase, return_tensors="pt" )["pixel_values"].numpy()
SCREAMING_SNAKE_CASE__ : str = processor(images=_UpperCAmelCase, return_tensors="pt" )["pixel_values"].numpy()
SCREAMING_SNAKE_CASE__ : Dict = image_processor(_UpperCAmelCase, return_tensors="tf" )["pixel_values"].numpy()
SCREAMING_SNAKE_CASE__ : Optional[int] = processor(images=_UpperCAmelCase, return_tensors="tf" )["pixel_values"].numpy()
self.assertTrue(np.allclose(_UpperCAmelCase, _UpperCAmelCase ) )
self.assertTrue(np.allclose(_UpperCAmelCase, _UpperCAmelCase ) )
self.assertTrue(np.allclose(_UpperCAmelCase, _UpperCAmelCase ) )
| 663 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Any, _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Union[str, Any]=1_3, _UpperCAmelCase : Union[str, Any]=3_2, _UpperCAmelCase : Optional[Any]=2, _UpperCAmelCase : Tuple=3, _UpperCAmelCase : str=1_6, _UpperCAmelCase : Tuple=[1, 2, 1], _UpperCAmelCase : List[str]=[2, 2, 4], _UpperCAmelCase : Tuple=2, _UpperCAmelCase : str=2.0, _UpperCAmelCase : Optional[int]=True, _UpperCAmelCase : int=0.0, _UpperCAmelCase : Any=0.0, _UpperCAmelCase : Optional[int]=0.1, _UpperCAmelCase : int="gelu", _UpperCAmelCase : Any=False, _UpperCAmelCase : Any=True, _UpperCAmelCase : Tuple=0.02, _UpperCAmelCase : Any=1E-5, _UpperCAmelCase : Optional[int]=True, _UpperCAmelCase : List[Any]=None, _UpperCAmelCase : str=True, _UpperCAmelCase : Union[str, Any]=1_0, _UpperCAmelCase : List[str]=8, _UpperCAmelCase : Union[str, Any]=["stage1", "stage2", "stage3"], _UpperCAmelCase : Any=[1, 2, 3], ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = parent
SCREAMING_SNAKE_CASE__ : Tuple = batch_size
SCREAMING_SNAKE_CASE__ : List[Any] = image_size
SCREAMING_SNAKE_CASE__ : Optional[int] = patch_size
SCREAMING_SNAKE_CASE__ : List[str] = num_channels
SCREAMING_SNAKE_CASE__ : Optional[int] = embed_dim
SCREAMING_SNAKE_CASE__ : List[Any] = depths
SCREAMING_SNAKE_CASE__ : List[str] = num_heads
SCREAMING_SNAKE_CASE__ : str = window_size
SCREAMING_SNAKE_CASE__ : Any = mlp_ratio
SCREAMING_SNAKE_CASE__ : List[str] = qkv_bias
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Tuple = drop_path_rate
SCREAMING_SNAKE_CASE__ : Dict = hidden_act
SCREAMING_SNAKE_CASE__ : List[str] = use_absolute_embeddings
SCREAMING_SNAKE_CASE__ : Tuple = patch_norm
SCREAMING_SNAKE_CASE__ : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : List[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : List[str] = is_training
SCREAMING_SNAKE_CASE__ : List[Any] = scope
SCREAMING_SNAKE_CASE__ : Optional[int] = use_labels
SCREAMING_SNAKE_CASE__ : Dict = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Optional[int] = encoder_stride
SCREAMING_SNAKE_CASE__ : List[Any] = out_features
SCREAMING_SNAKE_CASE__ : Dict = out_indices
def A_ ( self : List[str] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size], self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_config()
return config, pixel_values, labels
def A_ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, embed_dim=self.embed_dim, depths=self.depths, num_heads=self.num_heads, window_size=self.window_size, mlp_ratio=self.mlp_ratio, qkv_bias=self.qkv_bias, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, drop_path_rate=self.drop_path_rate, hidden_act=self.hidden_act, use_absolute_embeddings=self.use_absolute_embeddings, path_norm=self.patch_norm, layer_norm_eps=self.layer_norm_eps, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, out_features=self.out_features, out_indices=self.out_indices, )
def A_ ( self : Dict, _UpperCAmelCase : int, _UpperCAmelCase : str, _UpperCAmelCase : Any ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = MaskFormerSwinModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, expected_seq_len, expected_dim) )
def A_ ( self : Optional[int], _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Any, _UpperCAmelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = MaskFormerSwinBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ), len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [1_3, 1_6, 1_6, 1_6] )
# verify channels
self.parent.assertEqual(len(model.channels ), len(config.out_features ) )
self.parent.assertListEqual(model.channels, [1_6, 3_2, 6_4] )
# verify ValueError
with self.parent.assertRaises(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ : List[Any] = ["stem"]
SCREAMING_SNAKE_CASE__ : str = MaskFormerSwinBackbone(config=_UpperCAmelCase )
def A_ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[str] = config_and_inputs
SCREAMING_SNAKE_CASE__ : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
UpperCAmelCase_ = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
def A_ ( self : int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = MaskFormerSwinModelTester(self )
SCREAMING_SNAKE_CASE__ : Any = ConfigTester(self, config_class=_UpperCAmelCase, embed_dim=3_7 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"
" `nn.DataParallel`"
) )
def A_ ( self : Any ) -> List[Any]:
"""simple docstring"""
pass
def A_ ( self : Tuple ) -> Any:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A_ ( self : int ) -> Optional[Any]:
"""simple docstring"""
return
def A_ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def A_ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_UpperCAmelCase )
@unittest.skip("Swin does not use inputs_embeds" )
def A_ ( self : Any ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip("Swin does not support feedforward chunking" )
def A_ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
pass
def A_ ( self : Any ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[Any] = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase, nn.Linear ) )
def A_ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Dict = model_class(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : str = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1], _UpperCAmelCase )
@unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions" )
def A_ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="MaskFormerSwin is only used as an internal backbone" )
def A_ ( self : Dict ) -> List[str]:
"""simple docstring"""
pass
def A_ ( self : List[str], _UpperCAmelCase : Optional[int], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Dict = model(**self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ : List[Any] = outputs.hidden_states
SCREAMING_SNAKE_CASE__ : Optional[Any] = getattr(
self.model_tester, "expected_num_hidden_layers", len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_UpperCAmelCase ), _UpperCAmelCase )
# Swin has a different seq_length
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size, collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE__ : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [num_patches, self.model_tester.embed_dim], )
def A_ ( self : Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size, collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : str = True
self.check_hidden_states_output(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ : Optional[int] = True
self.check_hidden_states_output(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
def A_ ( self : Tuple ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Optional[int] = 3
SCREAMING_SNAKE_CASE__ : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size, collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
SCREAMING_SNAKE_CASE__ : str = (
config.patch_size
if isinstance(config.patch_size, collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE__ : Dict = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
SCREAMING_SNAKE_CASE__ : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[Any] = True
self.check_hidden_states_output(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ : Any = True
self.check_hidden_states_output(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, (padded_height, padded_width) )
@unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints" )
def A_ ( self : List[Any] ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def A_ ( self : Dict ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def A_ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
pass
def A_ ( self : int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(_UpperCAmelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Dict = 0
return t
def check_equivalence(_UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Tuple, _UpperCAmelCase : Optional[Any]={} ):
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : str = model(**_UpperCAmelCase, return_dict=_UpperCAmelCase, **_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = model(**_UpperCAmelCase, return_dict=_UpperCAmelCase, **_UpperCAmelCase ).to_tuple()
def recursive_check(_UpperCAmelCase : int, _UpperCAmelCase : Dict ):
if isinstance(_UpperCAmelCase, (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_UpperCAmelCase, _UpperCAmelCase ):
recursive_check(_UpperCAmelCase, _UpperCAmelCase )
elif isinstance(_UpperCAmelCase, _UpperCAmelCase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values(), dict_object.values() ):
recursive_check(_UpperCAmelCase, _UpperCAmelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_UpperCAmelCase ), set_nan_tensor_to_zero(_UpperCAmelCase ), atol=1E-5 ), msg=(
"Tuple and dict output are not equal. Difference:"
F''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'''
F''' {torch.isnan(_UpperCAmelCase ).any()} and `inf`: {torch.isinf(_UpperCAmelCase )}. Dict has'''
F''' `nan`: {torch.isnan(_UpperCAmelCase ).any()} and `inf`: {torch.isinf(_UpperCAmelCase )}.'''
), )
recursive_check(_UpperCAmelCase, _UpperCAmelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase )
check_equivalence(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase, return_labels=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase, return_labels=_UpperCAmelCase )
check_equivalence(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase )
check_equivalence(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, {"output_hidden_states": True} )
SCREAMING_SNAKE_CASE__ : Dict = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase, return_labels=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase, return_labels=_UpperCAmelCase )
check_equivalence(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, {"output_hidden_states": True} )
@require_torch
class lowerCamelCase (unittest.TestCase , __lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = (MaskFormerSwinBackbone,) if is_torch_available() else ()
UpperCAmelCase_ = MaskFormerSwinConfig
def A_ ( self : Any ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = MaskFormerSwinModelTester(self )
def A_ ( self : int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Any = inputs_dict["pixel_values"].shape[0]
for backbone_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : str = backbone_class(_UpperCAmelCase )
backbone.to(_UpperCAmelCase )
backbone.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = backbone(**_UpperCAmelCase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps, _UpperCAmelCase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps, backbone.channels ):
self.assertTrue(feature_map.shape[:2], (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
SCREAMING_SNAKE_CASE__ : Optional[int] = backbone(**_UpperCAmelCase, output_hidden_states=_UpperCAmelCase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ), len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:], backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels), (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
SCREAMING_SNAKE_CASE__ : int = backbone(**_UpperCAmelCase, output_attentions=_UpperCAmelCase )
self.assertIsNotNone(outputs.attentions )
| 663 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> List[Any]:
A : Optional[Any] =AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=_lowercase ).to(_lowercase )
A : Optional[int] =AutoTokenizer.from_pretrained('google/mt5-small' )
A : List[Any] =tokenizer('Hello there' , return_tensors='pt' ).input_ids
A : Dict =tokenizer('Hi I am' , return_tensors='pt' ).input_ids
A : Optional[int] =model(input_ids.to(_lowercase ) , labels=labels.to(_lowercase ) ).loss
A : List[str] =-(labels.shape[-1] * loss.item())
A : Optional[int] =-84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 703 | import heapq
def A__ ( lowercase: dict ) -> set[int]:
A : list[list] =[]
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowercase, [-1 * len(lowercase ), (key, value)] )
# chosen_vertices = set of chosen vertices
A : Dict =set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
A : List[str] =heapq.heappop(lowercase )[1][0]
chosen_vertices.add(lowercase )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
A : str =elem[1][1].index(lowercase )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowercase )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase : List[Any] ={0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f'''Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}''')
| 661 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"microsoft/table-transformer-detection": (
"https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"
),
}
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = '''table-transformer'''
__snake_case = ['''past_key_values''']
__snake_case = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : Optional[Any] , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : Union[str, Any]=3 , __UpperCAmelCase : Dict=100 , __UpperCAmelCase : Optional[Any]=6 , __UpperCAmelCase : Tuple=2_048 , __UpperCAmelCase : List[Any]=8 , __UpperCAmelCase : Tuple=6 , __UpperCAmelCase : Dict=2_048 , __UpperCAmelCase : str=8 , __UpperCAmelCase : int=0.0 , __UpperCAmelCase : List[Any]=0.0 , __UpperCAmelCase : int=True , __UpperCAmelCase : str="relu" , __UpperCAmelCase : Tuple=256 , __UpperCAmelCase : Optional[int]=0.1 , __UpperCAmelCase : int=0.0 , __UpperCAmelCase : Tuple=0.0 , __UpperCAmelCase : Tuple=0.02 , __UpperCAmelCase : List[Any]=1.0 , __UpperCAmelCase : List[Any]=False , __UpperCAmelCase : int="sine" , __UpperCAmelCase : List[str]="resnet50" , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : Dict=False , __UpperCAmelCase : str=1 , __UpperCAmelCase : Union[str, Any]=5 , __UpperCAmelCase : Dict=2 , __UpperCAmelCase : List[Any]=1 , __UpperCAmelCase : Any=1 , __UpperCAmelCase : Optional[Any]=5 , __UpperCAmelCase : List[str]=2 , __UpperCAmelCase : List[Any]=0.1 , **__UpperCAmelCase : Any , ) ->str:
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
a = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
a = backbone_config.get('''model_type''' )
a = CONFIG_MAPPING[backbone_model_type]
a = config_class.from_dict(__UpperCAmelCase )
# set timm attributes to None
a , a , a = None, None, None
a = use_timm_backbone
a = backbone_config
a = num_channels
a = num_queries
a = d_model
a = encoder_ffn_dim
a = encoder_layers
a = encoder_attention_heads
a = decoder_ffn_dim
a = decoder_layers
a = decoder_attention_heads
a = dropout
a = attention_dropout
a = activation_dropout
a = activation_function
a = init_std
a = init_xavier_std
a = encoder_layerdrop
a = decoder_layerdrop
a = encoder_layers
a = auxiliary_loss
a = position_embedding_type
a = backbone
a = use_pretrained_backbone
a = dilation
# Hungarian matcher
a = class_cost
a = bbox_cost
a = giou_cost
# Loss coefficients
a = mask_loss_coefficient
a = dice_loss_coefficient
a = bbox_loss_coefficient
a = giou_loss_coefficient
a = eos_coefficient
super().__init__(is_encoder_decoder=__UpperCAmelCase , **__UpperCAmelCase )
@property
def __lowerCAmelCase ( self : Tuple ) ->int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def __lowerCAmelCase ( self : List[Any] ) ->int:
"""simple docstring"""
return self.d_model
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = version.parse('''1.11''' )
@property
def __lowerCAmelCase ( self : Optional[int] ) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def __lowerCAmelCase ( self : Tuple ) ->float:
"""simple docstring"""
return 1e-5
@property
def __lowerCAmelCase ( self : int ) ->int:
"""simple docstring"""
return 12
| 117 |
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
UpperCAmelCase__ = logging.getLogger(__name__)
@dataclass
class lowercase_ :
'''simple docstring'''
__snake_case = 42
__snake_case = 42
__snake_case = 42
@dataclass
class lowercase_ :
'''simple docstring'''
__snake_case = 42
__snake_case = 42
__snake_case = None
__snake_case = None
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = '''train'''
__snake_case = '''dev'''
__snake_case = '''test'''
class lowercase_ :
'''simple docstring'''
@staticmethod
def __lowerCAmelCase ( __UpperCAmelCase : int , __UpperCAmelCase : Union[Split, str] ) ->List[InputExample]:
"""simple docstring"""
raise NotImplementedError
@staticmethod
def __lowerCAmelCase ( __UpperCAmelCase : str ) ->List[str]:
"""simple docstring"""
raise NotImplementedError
@staticmethod
def __lowerCAmelCase ( __UpperCAmelCase : List[InputExample] , __UpperCAmelCase : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : PreTrainedTokenizer , __UpperCAmelCase : Tuple=False , __UpperCAmelCase : str="[CLS]" , __UpperCAmelCase : str=1 , __UpperCAmelCase : List[str]="[SEP]" , __UpperCAmelCase : Union[str, Any]=False , __UpperCAmelCase : Tuple=False , __UpperCAmelCase : List[str]=0 , __UpperCAmelCase : int=0 , __UpperCAmelCase : Tuple=-100 , __UpperCAmelCase : Optional[int]=0 , __UpperCAmelCase : Any=True , ) ->List[InputFeatures]:
"""simple docstring"""
a = {label: i for i, label in enumerate(__UpperCAmelCase )}
a = []
for ex_index, example in enumerate(__UpperCAmelCase ):
if ex_index % 10_000 == 0:
logger.info('''Writing example %d of %d''' , __UpperCAmelCase , len(__UpperCAmelCase ) )
a = []
a = []
for word, label in zip(example.words , example.labels ):
a = tokenizer.tokenize(__UpperCAmelCase )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(__UpperCAmelCase ) > 0:
tokens.extend(__UpperCAmelCase )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(__UpperCAmelCase ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
a = tokenizer.num_special_tokens_to_add()
if len(__UpperCAmelCase ) > max_seq_length - special_tokens_count:
a = tokens[: (max_seq_length - special_tokens_count)]
a = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
a = [sequence_a_segment_id] * len(__UpperCAmelCase )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
a = [cls_token] + tokens
a = [pad_token_label_id] + label_ids
a = [cls_token_segment_id] + segment_ids
a = tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
a = [1 if mask_padding_with_zero else 0] * len(__UpperCAmelCase )
# Zero-pad up to the sequence length.
a = max_seq_length - len(__UpperCAmelCase )
if pad_on_left:
a = ([pad_token] * padding_length) + input_ids
a = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
a = ([pad_token_segment_id] * padding_length) + segment_ids
a = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(__UpperCAmelCase ) == max_seq_length
assert len(__UpperCAmelCase ) == max_seq_length
assert len(__UpperCAmelCase ) == max_seq_length
assert len(__UpperCAmelCase ) == max_seq_length
if ex_index < 5:
logger.info('''*** Example ***''' )
logger.info('''guid: %s''' , example.guid )
logger.info('''tokens: %s''' , ''' '''.join([str(__UpperCAmelCase ) for x in tokens] ) )
logger.info('''input_ids: %s''' , ''' '''.join([str(__UpperCAmelCase ) for x in input_ids] ) )
logger.info('''input_mask: %s''' , ''' '''.join([str(__UpperCAmelCase ) for x in input_mask] ) )
logger.info('''segment_ids: %s''' , ''' '''.join([str(__UpperCAmelCase ) for x in segment_ids] ) )
logger.info('''label_ids: %s''' , ''' '''.join([str(__UpperCAmelCase ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
a = None
features.append(
InputFeatures(
input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , label_ids=__UpperCAmelCase ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = 42
__snake_case = nn.CrossEntropyLoss().ignore_index
def __init__( self : str , __UpperCAmelCase : TokenClassificationTask , __UpperCAmelCase : str , __UpperCAmelCase : PreTrainedTokenizer , __UpperCAmelCase : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Any=False , __UpperCAmelCase : Split = Split.train , ) ->List[str]:
"""simple docstring"""
a = os.path.join(
__UpperCAmelCase , '''cached_{}_{}_{}'''.format(mode.value , tokenizer.__class__.__name__ , str(__UpperCAmelCase ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
a = cached_features_file + '''.lock'''
with FileLock(__UpperCAmelCase ):
if os.path.exists(__UpperCAmelCase ) and not overwrite_cache:
logger.info(F"""Loading features from cached file {cached_features_file}""" )
a = torch.load(__UpperCAmelCase )
else:
logger.info(F"""Creating features from dataset file at {data_dir}""" )
a = token_classification_task.read_examples_from_file(__UpperCAmelCase , __UpperCAmelCase )
# TODO clean up all this to leverage built-in features of tokenizers
a = token_classification_task.convert_examples_to_features(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__UpperCAmelCase , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(F"""Saving features into cached file {cached_features_file}""" )
torch.save(self.features , __UpperCAmelCase )
def __len__( self : List[str] ) ->Union[str, Any]:
"""simple docstring"""
return len(self.features )
def __getitem__( self : List[Any] , __UpperCAmelCase : Dict ) ->InputFeatures:
"""simple docstring"""
return self.features[i]
if is_tf_available():
import tensorflow as tf
class lowercase_ :
'''simple docstring'''
__snake_case = 42
__snake_case = -1_00
def __init__( self : Any , __UpperCAmelCase : TokenClassificationTask , __UpperCAmelCase : str , __UpperCAmelCase : PreTrainedTokenizer , __UpperCAmelCase : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : List[Any]=False , __UpperCAmelCase : Split = Split.train , ) ->Optional[int]:
"""simple docstring"""
a = token_classification_task.read_examples_from_file(__UpperCAmelCase , __UpperCAmelCase )
# TODO clean up all this to leverage built-in features of tokenizers
a = token_classification_task.convert_examples_to_features(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__UpperCAmelCase , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
a = tf.data.Dataset.from_generator(
__UpperCAmelCase , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa}, tf.intaa) , (
{'''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
a = tf.data.Dataset.from_generator(
__UpperCAmelCase , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa}, tf.intaa) , (
{
'''input_ids''': tf.TensorShape([None] ),
'''attention_mask''': tf.TensorShape([None] ),
'''token_type_ids''': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def __lowerCAmelCase ( self : str ) ->Optional[int]:
"""simple docstring"""
a = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : List[str] ) ->Dict:
"""simple docstring"""
return len(self.features )
def __getitem__( self : Tuple , __UpperCAmelCase : List[Any] ) ->InputFeatures:
"""simple docstring"""
return self.features[i]
| 117 | 1 |
'''simple docstring'''
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
UpperCAmelCase : List[str] = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
return max(metric_fn(lowerCAmelCase_ , lowerCAmelCase_ ) for gt in ground_truths )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : str = [line.strip() for line in open(lowerCAmelCase_ , '''r''' ).readlines()]
_snake_case : int = []
if args.gold_data_mode == "qa":
_snake_case : Tuple = pd.read_csv(lowerCAmelCase_ , sep='''\t''' , header=lowerCAmelCase_ )
for answer_list in data[1]:
_snake_case : Any = ast.literal_eval(lowerCAmelCase_ )
answers.append(lowerCAmelCase_ )
else:
_snake_case : Optional[int] = [line.strip() for line in open(lowerCAmelCase_ , '''r''' ).readlines()]
_snake_case : List[Any] = [[reference] for reference in references]
_snake_case : str = 0
for prediction, ground_truths in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
total += 1
em += metric_max_over_ground_truths(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
fa += metric_max_over_ground_truths(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case : Union[str, Any] = 100.0 * em / total
_snake_case : Dict = 100.0 * fa / total
logger.info(f'''F1: {fa:.2f}''' )
logger.info(f'''EM: {em:.2f}''' )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : int = args.k
_snake_case : List[str] = [line.strip() for line in open(lowerCAmelCase_ , '''r''' ).readlines()]
_snake_case : Dict = [line.strip() for line in open(lowerCAmelCase_ , '''r''' ).readlines()]
_snake_case : str = 0
for hypo, reference in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case : Union[str, Any] = set(hypo.split('''\t''' )[:k] )
_snake_case : int = set(reference.split('''\t''' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
_snake_case : str = 100.0 * em / total
logger.info(f'''Precision@{k}: {em: .2f}''' )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
def strip_title(lowerCAmelCase_ ):
if title.startswith('''"''' ):
_snake_case : int = title[1:]
if title.endswith('''"''' ):
_snake_case : Optional[Any] = title[:-1]
return title
_snake_case : List[Any] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
lowerCAmelCase_ , return_tensors='''pt''' , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , )['''input_ids'''].to(args.device )
_snake_case : Any = rag_model.rag.question_encoder(lowerCAmelCase_ )
_snake_case : Any = question_enc_outputs[0]
_snake_case : Optional[Any] = rag_model.retriever(
lowerCAmelCase_ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , )
_snake_case : Any = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
_snake_case : Any = []
for docs in all_docs:
_snake_case : Optional[int] = [strip_title(lowerCAmelCase_ ) for title in docs['''title''']]
provenance_strings.append('''\t'''.join(lowerCAmelCase_ ) )
return provenance_strings
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
with torch.no_grad():
_snake_case : Dict = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
lowerCAmelCase_ , return_tensors='''pt''' , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ )
_snake_case : Tuple = inputs_dict.input_ids.to(args.device )
_snake_case : int = inputs_dict.attention_mask.to(args.device )
_snake_case : int = rag_model.generate( # rag_model overwrites generate
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=lowerCAmelCase_ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
_snake_case : Union[str, Any] = rag_model.retriever.generator_tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
if args.print_predictions:
for q, a in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
logger.info('''Q: {} - A: {}'''.format(lowerCAmelCase_ , lowerCAmelCase_ ) )
return answers
def _a ( ):
"""simple docstring"""
_snake_case : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=lowerCAmelCase_ , help=(
'''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'''
''' model_name_or_path'''
) , )
parser.add_argument(
'''--index_name''' , default=lowerCAmelCase_ , choices=['''exact''', '''compressed''', '''legacy'''] , type=lowerCAmelCase_ , help='''RAG model retriever type''' , )
parser.add_argument(
'''--index_path''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , help='''Path to the retrieval index''' , )
parser.add_argument('''--n_docs''' , default=5 , type=lowerCAmelCase_ , help='''Number of retrieved docs''' )
parser.add_argument(
'''--model_name_or_path''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=lowerCAmelCase_ , help=(
'''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'''
''' precision@k.'''
) , )
parser.add_argument('''--k''' , default=1 , type=lowerCAmelCase_ , help='''k for the precision@k calculation''' )
parser.add_argument(
'''--evaluation_set''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''Path to a file containing evaluation samples''' , )
parser.add_argument(
'''--gold_data_path''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''Path to a tab-separated file with gold samples''' , )
parser.add_argument(
'''--gold_data_mode''' , default='''qa''' , type=lowerCAmelCase_ , choices=['''qa''', '''ans'''] , help=(
'''Format of the gold data file'''
'''qa - a single line in the following format: question [tab] answer_list'''
'''ans - a single line of the gold file contains the expected answer string'''
) , )
parser.add_argument(
'''--predictions_path''' , type=lowerCAmelCase_ , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , )
parser.add_argument(
'''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , )
parser.add_argument(
'''--eval_batch_size''' , default=8 , type=lowerCAmelCase_ , help='''Batch size per GPU/CPU for evaluation.''' , )
parser.add_argument(
'''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , )
parser.add_argument(
'''--num_beams''' , default=4 , type=lowerCAmelCase_ , help='''Number of beams to be used when generating answers''' , )
parser.add_argument('''--min_length''' , default=1 , type=lowerCAmelCase_ , help='''Min length of the generated answers''' )
parser.add_argument('''--max_length''' , default=50 , type=lowerCAmelCase_ , help='''Max length of the generated answers''' )
parser.add_argument(
'''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , )
parser.add_argument(
'''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , )
_snake_case : Optional[Any] = parser.parse_args()
_snake_case : List[str] = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
return args
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : str = {}
if args.model_type is None:
_snake_case : int = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('''rag''' ):
_snake_case : Optional[int] = RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration
_snake_case : Optional[int] = args.n_docs
if args.index_name is not None:
_snake_case : str = args.index_name
if args.index_path is not None:
_snake_case : Any = args.index_path
else:
_snake_case : str = BartForConditionalGeneration
_snake_case : Any = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('''Evaluate the following checkpoints: %s''' , lowerCAmelCase_ )
_snake_case : Optional[int] = get_scores if args.eval_mode == '''e2e''' else get_precision_at_k
_snake_case : str = evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path ) )
score_fn(lowerCAmelCase_ , args.predictions_path , args.gold_data_path )
continue
logger.info('''***** Running evaluation for {} *****'''.format(lowerCAmelCase_ ) )
logger.info(''' Batch size = %d''' , args.eval_batch_size )
logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path ) )
if args.model_type.startswith('''rag''' ):
_snake_case : List[str] = RagRetriever.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case : List[Any] = model_class.from_pretrained(lowerCAmelCase_ , retriever=lowerCAmelCase_ , **lowerCAmelCase_ )
model.retriever.init_retrieval()
else:
_snake_case : Optional[Any] = model_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
model.to(args.device )
with open(args.evaluation_set , '''r''' ) as eval_file, open(args.predictions_path , '''w''' ) as preds_file:
_snake_case : Tuple = []
for line in tqdm(lowerCAmelCase_ ):
questions.append(line.strip() )
if len(lowerCAmelCase_ ) == args.eval_batch_size:
_snake_case : List[Any] = evaluate_batch_fn(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
preds_file.write('''\n'''.join(lowerCAmelCase_ ) + '''\n''' )
preds_file.flush()
_snake_case : Any = []
if len(lowerCAmelCase_ ) > 0:
_snake_case : List[Any] = evaluate_batch_fn(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
preds_file.write('''\n'''.join(lowerCAmelCase_ ) )
preds_file.flush()
score_fn(lowerCAmelCase_ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
UpperCAmelCase : int = get_args()
main(args)
| 47 |
'''simple docstring'''
from collections.abc import Generator
def _a ( ):
"""simple docstring"""
_snake_case , _snake_case : Union[str, Any] = 0, 1
while True:
_snake_case , _snake_case : List[str] = b, a + b
yield b
def _a ( lowerCAmelCase_ = 1_000 ):
"""simple docstring"""
_snake_case : List[str] = 1
_snake_case : Dict = fibonacci_generator()
while len(str(next(lowerCAmelCase_ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 47 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : int = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
lowerCamelCase : Optional[Any] = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
lowerCamelCase : Any = model(A_ )["last_hidden_state"]
lowerCamelCase : Dict = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , A_ )
# compare the actual values for a slice.
lowerCamelCase : Union[str, Any] = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 340 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__snake_case = 1_6
__snake_case = 3_2
def _A ( _lowercase , _lowercase = 16 , _lowercase = "bert-base-cased" ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = AutoTokenizer.from_pretrained(_lowercase )
__UpperCamelCase = load_dataset('glue' , 'mrpc' )
def tokenize_function(_lowercase ):
# max_length=None => use the model max length (it's actually the default)
__UpperCamelCase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_lowercase , max_length=_lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__UpperCamelCase = datasets.map(
_lowercase , batched=_lowercase , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=_lowercase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCamelCase = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowercase , padding='max_length' , max_length=1_28 , return_tensors='pt' )
return tokenizer.pad(_lowercase , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
__UpperCamelCase = DataLoader(
tokenized_datasets['train'] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
__UpperCamelCase = DataLoader(
tokenized_datasets['validation'] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
return train_dataloader, eval_dataloader
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCamelCase = config['lr']
__UpperCamelCase = int(config['num_epochs'] )
__UpperCamelCase = int(config['seed'] )
__UpperCamelCase = int(config['batch_size'] )
__UpperCamelCase = args.model_name_or_path
set_seed(_lowercase )
__UpperCamelCase, __UpperCamelCase = get_dataloaders(_lowercase , _lowercase , _lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCamelCase = AutoModelForSequenceClassification.from_pretrained(_lowercase , return_dict=_lowercase )
# Instantiate optimizer
__UpperCamelCase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__UpperCamelCase = optimizer_cls(params=model.parameters() , lr=_lowercase )
if accelerator.state.deepspeed_plugin is not None:
__UpperCamelCase = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
__UpperCamelCase = 1
__UpperCamelCase = (len(_lowercase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__UpperCamelCase = get_linear_schedule_with_warmup(
optimizer=_lowercase , num_warmup_steps=0 , num_training_steps=_lowercase , )
else:
__UpperCamelCase = DummyScheduler(_lowercase , total_num_steps=_lowercase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# We need to keep track of how many total steps we have iterated over
__UpperCamelCase = 0
# We also need to keep track of the stating epoch so files are named properly
__UpperCamelCase = 0
# Now we train the model
__UpperCamelCase = evaluate.load('glue' , 'mrpc' )
__UpperCamelCase = 0
__UpperCamelCase = {}
for epoch in range(_lowercase , _lowercase ):
model.train()
for step, batch in enumerate(_lowercase ):
__UpperCamelCase = model(**_lowercase )
__UpperCamelCase = outputs.loss
__UpperCamelCase = loss / gradient_accumulation_steps
accelerator.backward(_lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
__UpperCamelCase = 0
for step, batch in enumerate(_lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__UpperCamelCase = model(**_lowercase )
__UpperCamelCase = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__UpperCamelCase, __UpperCamelCase = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_lowercase ) - 1:
__UpperCamelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__UpperCamelCase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_lowercase , references=_lowercase , )
__UpperCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , _lowercase )
__UpperCamelCase = eval_metric['accuracy']
if best_performance < eval_metric["accuracy"]:
__UpperCamelCase = eval_metric['accuracy']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f:
json.dump(_lowercase , _lowercase )
def _A ( ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=_lowercase , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_lowercase , )
parser.add_argument(
'--output_dir' , type=_lowercase , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--performance_lower_bound' , type=_lowercase , default=_lowercase , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , )
parser.add_argument(
'--num_epochs' , type=_lowercase , default=3 , help='Number of train epochs.' , )
__UpperCamelCase = parser.parse_args()
__UpperCamelCase = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(_lowercase , _lowercase )
if __name__ == "__main__":
main()
| 1 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class _A ( unittest.TestCase ):
'''simple docstring'''
def _a (self ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = tempfile.mkdtemp()
UpperCamelCase__ = BlipImageProcessor()
UpperCamelCase__ = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
UpperCamelCase__ = BlipaProcessor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
def _a (self , **SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ).tokenizer
def _a (self , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ).image_processor
def _a (self ) -> List[str]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _a (self ) -> Any:
'''simple docstring'''
UpperCamelCase__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCamelCase__ = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a (self ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase__ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
UpperCamelCase__ = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 )
UpperCamelCase__ = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE_ )
def _a (self ) -> str:
'''simple docstring'''
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = BlipaProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.prepare_image_inputs()
UpperCamelCase__ = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors='''np''' )
UpperCamelCase__ = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a (self ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = BlipaProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = '''lower newer'''
UpperCamelCase__ = processor(text=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer(SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _a (self ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = BlipaProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = '''lower newer'''
UpperCamelCase__ = self.prepare_image_inputs()
UpperCamelCase__ = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
processor()
def _a (self ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = BlipaProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase__ = processor.batch_decode(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _a (self ) -> str:
'''simple docstring'''
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = BlipaProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = '''lower newer'''
UpperCamelCase__ = self.prepare_image_inputs()
UpperCamelCase__ = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
| 719 | import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
__magic_name__ =logging.get_logger(__name__) # pylint: disable=invalid-name
class _A ( __UpperCamelCase ):
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=768 ) -> Dict:
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = proj_size
UpperCamelCase__ = CLIPVisionModel(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = PaintByExampleMapper(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = nn.LayerNorm(config.hidden_size )
UpperCamelCase__ = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
UpperCamelCase__ = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = self.model(pixel_values=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = clip_output.pooler_output
UpperCamelCase__ = self.mapper(latent_states[:, None] )
UpperCamelCase__ = self.final_layer_norm(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.proj_out(SCREAMING_SNAKE_CASE_ )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class _A ( nn.Module ):
def __init__(self , SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
super().__init__()
UpperCamelCase__ = (config.num_hidden_layers + 1) // 5
UpperCamelCase__ = config.hidden_size
UpperCamelCase__ = 1
UpperCamelCase__ = nn.ModuleList(
[
BasicTransformerBlock(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , activation_fn='''gelu''' , attention_bias=SCREAMING_SNAKE_CASE_ )
for _ in range(SCREAMING_SNAKE_CASE_ )
] )
def _a (self , SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
for block in self.blocks:
UpperCamelCase__ = block(SCREAMING_SNAKE_CASE_ )
return hidden_states
| 469 | 0 |
import math
import sys
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
if number != int(lowercase ):
raise ValueError('''the value of input must be a natural number''' )
if number < 0:
raise ValueError('''the value of input must not be a negative number''' )
if number == 0:
return 1
__lowercase = [-1] * (number + 1)
__lowercase = 0
for i in range(1 , number + 1 ):
__lowercase = sys.maxsize
__lowercase = int(math.sqrt(lowercase ) )
for j in range(1 , root + 1 ):
__lowercase = 1 + answers[i - (j**2)]
__lowercase = min(lowercase , lowercase )
__lowercase = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod() | 534 | import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
__a : Union[str, Any] = logging.get_logger(__name__)
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
__lowercase = torch.load(lowercase , map_location='''cpu''' )
if "model" in sd.keys():
__lowercase = torch.load(lowercase , map_location='''cpu''' )['''model''']
# pop unnecessary weights
__lowercase = [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(lowercase )
__lowercase = {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
__lowercase = sd.pop(lowercase )
__lowercase = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
__lowercase = sd[key]
# We split QKV in separate Q,K,V
__lowercase = key.replace('''.qkv_proj.''' , '''.q_proj.''' )
__lowercase = key.replace('''.qkv_proj.''' , '''.k_proj.''' )
__lowercase = key.replace('''.qkv_proj.''' , '''.v_proj.''' )
__lowercase = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
__lowercase , __lowercase , __lowercase = torch.split(lowercase , depth // 3 , dim=0 )
__lowercase = q
__lowercase = k
__lowercase = v
del sd[key]
return sd
@torch.no_grad()
def UpperCAmelCase ( lowercase , lowercase , lowercase=None ):
"""simple docstring"""
__lowercase = load_checkpoint(lowercase )
if config is not None:
__lowercase = OPTConfig.from_pretrained(lowercase )
else:
__lowercase = OPTConfig()
__lowercase = OPTModel(lowercase ).half().eval()
model.load_state_dict(lowercase )
# Check results
Path(lowercase ).mkdir(exist_ok=lowercase )
model.save_pretrained(lowercase )
if __name__ == "__main__":
__a : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fairseq_path""",
type=str,
help=(
"""path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"""
""" https://huggingface.co/models?other=opt_metasq"""
),
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--hf_config""", default=None, type=str, help="""Define HF config.""")
__a : Any = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config) | 534 | 1 |
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {'vocab_file': 'spiece.model'}
__UpperCamelCase = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
}
}
# TODO(PVP) - this should be removed in Transformers v5
__UpperCamelCase = {
't5-small': 5_1_2,
't5-base': 5_1_2,
't5-large': 5_1_2,
't5-3b': 5_1_2,
't5-11b': 5_1_2,
}
__UpperCamelCase = '▁'
class lowerCamelCase__ ( UpperCAmelCase ):
"""simple docstring"""
_UpperCamelCase : str = VOCAB_FILES_NAMES
_UpperCamelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Dict = ['input_ids', 'attention_mask']
def __init__( self , snake_case , snake_case="</s>" , snake_case="<unk>" , snake_case="<pad>" , snake_case=100 , snake_case=None , snake_case = None , snake_case=True , **snake_case , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
UpperCamelCase__ = [F'''<extra_id_{i}>''' for i in range(snake_case )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
UpperCamelCase__ = len(set(filter(lambda snake_case : bool("extra_id" in str(snake_case ) ) , snake_case ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens" )
if legacy:
logger.warning_once(
F'''You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'''
" read the related pull request available at https://github.com/huggingface/transformers/pull/24565" )
UpperCamelCase__ = legacy
UpperCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=snake_case , unk_token=snake_case , pad_token=snake_case , extra_ids=snake_case , additional_special_tokens=snake_case , sp_model_kwargs=self.sp_model_kwargs , legacy=snake_case , **snake_case , )
UpperCamelCase__ = vocab_file
UpperCamelCase__ = extra_ids
UpperCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case )
@staticmethod
def snake_case__ ( snake_case , snake_case , snake_case ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
UpperCamelCase__ = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
F''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
F''' {pretrained_model_name_or_path} automatically truncating your input to'''
F''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
F''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , snake_case , )
return max_model_length
@property
def snake_case__ ( self ):
'''simple docstring'''
return self.sp_model.get_piece_size() + self._extra_ids
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = {self.convert_ids_to_tokens(snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case__ ( self , snake_case , snake_case = None , snake_case = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(snake_case )) + [1]
return ([0] * len(snake_case )) + [1] + ([0] * len(snake_case )) + [1]
def snake_case__ ( self ):
'''simple docstring'''
return list(
set(filter(lambda snake_case : bool(re.search(R"<extra_id_\d+>" , snake_case ) ) is not None , self.additional_special_tokens ) ) )
def snake_case__ ( self ):
'''simple docstring'''
return [self._convert_token_to_id(snake_case ) for token in self.get_sentinel_tokens()]
def snake_case__ ( self , snake_case ):
'''simple docstring'''
if len(snake_case ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
" eos tokens being added." )
return token_ids
else:
return token_ids + [self.eos_token_id]
def snake_case__ ( self , snake_case , snake_case = None ):
'''simple docstring'''
UpperCamelCase__ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def snake_case__ ( self , snake_case , snake_case = None ):
'''simple docstring'''
UpperCamelCase__ = self._add_eos_if_not_present(snake_case )
if token_ids_a is None:
return token_ids_a
else:
UpperCamelCase__ = self._add_eos_if_not_present(snake_case )
return token_ids_a + token_ids_a
def __getstate__( self ):
'''simple docstring'''
UpperCamelCase__ = self.__dict__.copy()
UpperCamelCase__ = None
return state
def __setstate__( self , snake_case ):
'''simple docstring'''
UpperCamelCase__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCamelCase__ = {}
UpperCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case__ ( self , snake_case , **snake_case ):
'''simple docstring'''
if not self.legacy:
UpperCamelCase__ = SPIECE_UNDERLINE + text.replace(snake_case , " " )
return super().tokenize(snake_case , **snake_case )
def snake_case__ ( self , snake_case , **snake_case ):
'''simple docstring'''
if not self.legacy:
UpperCamelCase__ = text.startswith(snake_case )
if is_first:
UpperCamelCase__ = text[1:]
UpperCamelCase__ = self.sp_model.encode(snake_case , out_type=snake_case )
if not self.legacy and not is_first and not text.startswith(" " ) and tokens[0].startswith(snake_case ):
UpperCamelCase__ = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def snake_case__ ( self , snake_case ):
'''simple docstring'''
if token.startswith("<extra_id_" ):
UpperCamelCase__ = re.match(R"<extra_id_(\d+)>" , snake_case )
UpperCamelCase__ = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(snake_case )
def snake_case__ ( self , snake_case ):
'''simple docstring'''
if index < self.sp_model.get_piece_size():
UpperCamelCase__ = self.sp_model.IdToPiece(snake_case )
else:
UpperCamelCase__ = F'''<extra_id_{self.vocab_size - 1 - index}>'''
return token
def snake_case__ ( self , snake_case ):
'''simple docstring'''
UpperCamelCase__ = []
UpperCamelCase__ = ""
UpperCamelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case ) + token
UpperCamelCase__ = True
UpperCamelCase__ = []
else:
current_sub_tokens.append(snake_case )
UpperCamelCase__ = False
out_string += self.sp_model.decode(snake_case )
return out_string.strip()
def snake_case__ ( self , snake_case , snake_case = None ):
'''simple docstring'''
if not os.path.isdir(snake_case ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase__ = os.path.join(
snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case , "wb" ) as fi:
UpperCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(snake_case )
return (out_vocab_file,)
| 185 |
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
__UpperCamelCase = logging.get_logger(__name__)
def UpperCamelCase_( )-> Any:
# Get the sagemaker specific mp parameters from smp_options variable.
UpperCamelCase__ = os.getenv("SM_HP_MP_PARAMETERS" , "{}" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
UpperCamelCase__ = json.loads(_A )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
UpperCamelCase__ = os.getenv("SM_FRAMEWORK_PARAMS" , "{}" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
UpperCamelCase__ = json.loads(_A )
if not mpi_options.get("sagemaker_mpi_enabled" , _A ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("smdistributed" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class lowerCamelCase__ ( UpperCAmelCase ):
"""simple docstring"""
_UpperCamelCase : str = field(
default='' , metadata={'help': 'Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'} , )
def snake_case__ ( self ):
'''simple docstring'''
super().__post_init__()
warnings.warn(
"`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use "
"`TrainingArguments` instead." , snake_case , )
@cached_property
def snake_case__ ( self ):
'''simple docstring'''
logger.info("PyTorch: setting up devices" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"torch.distributed process group is initialized, but local_rank == -1. "
"In order to use Torch DDP, launch your script with `python -m torch.distributed.launch" )
if self.no_cuda:
UpperCamelCase__ = torch.device("cpu" )
UpperCamelCase__ = 0
elif is_sagemaker_model_parallel_available():
UpperCamelCase__ = smp.local_rank()
UpperCamelCase__ = torch.device("cuda" , snake_case )
UpperCamelCase__ = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="smddp" , timeout=self.ddp_timeout_delta )
UpperCamelCase__ = int(os.getenv("SMDATAPARALLEL_LOCAL_RANK" ) )
UpperCamelCase__ = torch.device("cuda" , self.local_rank )
UpperCamelCase__ = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
UpperCamelCase__ = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
UpperCamelCase__ = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="nccl" , timeout=self.ddp_timeout_delta )
UpperCamelCase__ = torch.device("cuda" , self.local_rank )
UpperCamelCase__ = 1
if device.type == "cuda":
torch.cuda.set_device(snake_case )
return device
@property
def snake_case__ ( self ):
'''simple docstring'''
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def snake_case__ ( self ):
'''simple docstring'''
return not is_sagemaker_model_parallel_available()
@property
def snake_case__ ( self ):
'''simple docstring'''
return False
| 185 | 1 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Optional[Any] ):
"""simple docstring"""
snake_case : Optional[Any] = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
"decoder.output_projection.weight",
]
for k in ignore_keys:
state_dict.pop(_a , _a )
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: str ):
"""simple docstring"""
snake_case : Union[str, Any] = emb.weight.shape
snake_case : int = nn.Linear(_a , _a , bias=_a )
snake_case : str = emb.weight.data
return lin_layer
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Optional[Any] , lowerCamelCase_: str="facebook/mbart-large-en-ro" , lowerCamelCase_: Union[str, Any]=False , lowerCamelCase_: str=False ):
"""simple docstring"""
snake_case : List[str] = torch.load(_a , map_location="cpu" )["model"]
remove_ignore_keys_(_a )
snake_case : List[Any] = state_dict["encoder.embed_tokens.weight"].shape[0]
snake_case : Optional[int] = MBartConfig.from_pretrained(_a , vocab_size=_a )
if mbart_aa and finetuned:
snake_case : int = "relu"
snake_case : Optional[Any] = state_dict["decoder.embed_tokens.weight"]
snake_case : Optional[Any] = MBartForConditionalGeneration(_a )
model.model.load_state_dict(_a )
if finetuned:
snake_case : str = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config',
default='facebook/mbart-large-cc25',
type=str,
help='Which huggingface architecture to use: mbart-large',
)
parser.add_argument('--mbart_50', action='store_true', help='whether the model is mMART-50 checkpoint')
parser.add_argument('--finetuned', action='store_true', help='whether the model is a fine-tuned checkpoint')
A = parser.parse_args()
A = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 449 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class _UpperCamelCase ( __A ):
'''simple docstring'''
def __init__( self : Dict , a : Tuple , a : Any=13 , a : Any=7 , a : Union[str, Any]=True , a : List[Any]=True , a : List[str]=False , a : List[str]=True , a : Any=99 , a : str=32 , a : Any=5 , a : Optional[int]=4 , a : Union[str, Any]=37 , a : Dict="gelu" , a : List[Any]=0.1 , a : Optional[Any]=0.1 , a : List[str]=512 , a : Union[str, Any]=16 , a : str=2 , a : Dict=0.02 , a : Optional[int]=3 , a : Union[str, Any]=4 , a : int=None , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = parent
SCREAMING_SNAKE_CASE : Any = batch_size
SCREAMING_SNAKE_CASE : Optional[int] = seq_length
SCREAMING_SNAKE_CASE : List[Any] = is_training
SCREAMING_SNAKE_CASE : int = use_input_mask
SCREAMING_SNAKE_CASE : Tuple = use_token_type_ids
SCREAMING_SNAKE_CASE : str = use_labels
SCREAMING_SNAKE_CASE : Any = vocab_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : str = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = intermediate_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE : List[str] = type_vocab_size
SCREAMING_SNAKE_CASE : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE : Tuple = num_labels
SCREAMING_SNAKE_CASE : Tuple = num_choices
SCREAMING_SNAKE_CASE : Optional[Any] = scope
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : str = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : List[str] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : int = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : Dict ) -> str:
"""simple docstring"""
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self : Optional[Any] , a : int , a : Optional[int] , a : Optional[int] , a : Dict , a : str , a : str ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = DistilBertModel(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(a , a )
SCREAMING_SNAKE_CASE : Optional[Any] = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Tuple , a : Optional[int] , a : Dict , a : Tuple , a : int , a : int , a : Any ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = DistilBertForMaskedLM(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : str = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : List[Any] , a : int , a : Optional[Any] , a : Optional[Any] , a : str , a : str , a : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertForQuestionAnswering(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(
a , attention_mask=a , start_positions=a , end_positions=a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : Optional[int] , a : str , a : Any , a : int , a : Optional[Any] , a : int , a : str ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.num_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = DistilBertForSequenceClassification(a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : Optional[Any] , a : List[Any] , a : Optional[int] , a : Union[str, Any] , a : Dict , a : Any , a : Optional[Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.num_labels
SCREAMING_SNAKE_CASE : List[str] = DistilBertForTokenClassification(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : int , a : Any , a : Optional[int] , a : Union[str, Any] , a : Tuple , a : Optional[int] , a : Tuple ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.num_choices
SCREAMING_SNAKE_CASE : Any = DistilBertForMultipleChoice(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : Optional[Any] = model(
a , attention_mask=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE)) : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE : int = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( __A , __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCamelCase__ =(
{
'feature-extraction': DistilBertModel,
'fill-mask': DistilBertForMaskedLM,
'question-answering': DistilBertForQuestionAnswering,
'text-classification': DistilBertForSequenceClassification,
'token-classification': DistilBertForTokenClassification,
'zero-shot': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ =True
lowerCamelCase__ =True
lowerCamelCase__ =True
lowerCamelCase__ =True
def __UpperCamelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = DistilBertModelTester(self )
SCREAMING_SNAKE_CASE : List[str] = ConfigTester(self , config_class=a , dim=37 )
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*a )
def __UpperCamelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*a )
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*a )
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*a )
def __UpperCamelCase ( self : str ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*a )
def __UpperCamelCase ( self : List[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*a )
@slow
def __UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertModel.from_pretrained(a )
self.assertIsNotNone(a )
@slow
@require_torch_gpu
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : Any = model_class(config=a )
SCREAMING_SNAKE_CASE : List[Any] = self._prepare_for_class(a , a )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.jit.trace(
a , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a , os.path.join(a , "traced_model.pt" ) )
SCREAMING_SNAKE_CASE : Tuple = torch.jit.load(os.path.join(a , "traced_model.pt" ) , map_location=a )
loaded(inputs_dict["input_ids"].to(a ) , inputs_dict["attention_mask"].to(a ) )
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCamelCase ( self : int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = DistilBertModel.from_pretrained("distilbert-base-uncased" )
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[Any] = model(a , attention_mask=a )[0]
SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , a )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(
[[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) ) | 25 | 0 |
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class __A ( unittest.TestCase ):
def _snake_case ( self ):
lowerCamelCase =inspect.getfile(accelerate.test_utils )
lowerCamelCase =os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] )
lowerCamelCase =os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def _snake_case ( self ):
lowerCamelCase =f"""\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n """.split()
lowerCamelCase =[sys.executable] + distributed_args
execute_subprocess_async(lowercase_ , env=os.environ.copy() )
| 716 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase__ : List[Any] ={'''configuration_yolos''': ['''YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''YolosConfig''', '''YolosOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Dict =['''YolosFeatureExtractor''']
UpperCAmelCase__ : List[Any] =['''YolosImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[Any] =[
'''YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''YolosForObjectDetection''',
'''YolosModel''',
'''YolosPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : Optional[Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 269 | 0 |
"""simple docstring"""
from sklearn.metrics import recall_score
import datasets
lowercase__ = """
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
"""
lowercase__ = """
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{'recall': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{'recall': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric('recall')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{'recall': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric('recall')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'recall': array([1., 0., 0.])}
"""
lowercase__ = """
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
'''simple docstring'''
def A_ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('int32' ) ),
'references': datasets.Sequence(datasets.Value('int32' ) ),
}
if self.config_name == 'multilabel'
else {
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) , reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'] , )
def A_ ( self , lowercase , lowercase , lowercase=None , lowercase=1 , lowercase="binary" , lowercase=None , lowercase="warn" , ):
_lowerCamelCase : List[str] = recall_score(
lowercase , lowercase , labels=lowercase , pos_label=lowercase , average=lowercase , sample_weight=lowercase , zero_division=lowercase , )
return {"recall": float(lowercase ) if score.size == 1 else score} | 630 |
"""simple docstring"""
import qiskit
def _snake_case ( lowercase__ = 2 ):
_lowerCamelCase : Optional[Any] = qubits
# Using Aer's simulator
_lowerCamelCase : Dict = qiskit.Aer.get_backend('aer_simulator' )
# Creating a Quantum Circuit acting on the q register
_lowerCamelCase : Dict = qiskit.QuantumCircuit(lowercase__ , lowercase__ )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , lowercase__ ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , lowercase__ )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(lowercase__ ) ) , list(range(lowercase__ ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
_lowerCamelCase : Optional[Any] = qiskit.execute(lowercase__ , lowercase__ , shots=1000 )
return job.result().get_counts(lowercase__ )
if __name__ == "__main__":
print(F"Total count for various states are: {quantum_entanglement(3)}") | 630 | 1 |
"""simple docstring"""
def _snake_case ( lowerCamelCase__ : int ) -> int:
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError("Input must be an integer" )
if input_num <= 0:
raise ValueError("Input must be positive" )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 244 |
"""simple docstring"""
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class lowercase__ :
def __init__( self : List[str] , snake_case__ : str , snake_case__ : Any=14 , snake_case__ : str=7 , snake_case__ : Any=True , snake_case__ : Dict=True , snake_case__ : Optional[int]=False , snake_case__ : Union[str, Any]=True , snake_case__ : List[str]=99 , snake_case__ : List[Any]=32 , snake_case__ : List[Any]=4 , snake_case__ : Dict=4 , snake_case__ : int=4 , snake_case__ : int=37 , snake_case__ : Tuple="gelu" , snake_case__ : int=0.1 , snake_case__ : Optional[Any]=0.1 , snake_case__ : int=512 , snake_case__ : int=0.02 , ):
lowerCamelCase_ : List[Any] =parent
lowerCamelCase_ : List[Any] =batch_size
lowerCamelCase_ : str =seq_length
lowerCamelCase_ : int =is_training
lowerCamelCase_ : List[Any] =use_input_mask
lowerCamelCase_ : Any =use_token_type_ids
lowerCamelCase_ : Dict =use_labels
lowerCamelCase_ : List[Any] =vocab_size
lowerCamelCase_ : List[Any] =hidden_size
lowerCamelCase_ : List[Any] =rotary_dim
lowerCamelCase_ : Optional[Any] =num_hidden_layers
lowerCamelCase_ : Union[str, Any] =num_attention_heads
lowerCamelCase_ : Tuple =intermediate_size
lowerCamelCase_ : List[str] =hidden_act
lowerCamelCase_ : int =hidden_dropout_prob
lowerCamelCase_ : str =attention_probs_dropout_prob
lowerCamelCase_ : Tuple =max_position_embeddings
lowerCamelCase_ : Dict =initializer_range
lowerCamelCase_ : Any =None
lowerCamelCase_ : List[str] =vocab_size - 1
lowerCamelCase_ : List[str] =vocab_size - 1
lowerCamelCase_ : List[Any] =vocab_size - 1
def UpperCAmelCase__ ( self : List[Any] ):
lowerCamelCase_ : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ : Dict =None
if self.use_input_mask:
lowerCamelCase_ : Dict =random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ : List[Any] =GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=snake_case__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def UpperCAmelCase__ ( self : Union[str, Any] ):
lowerCamelCase_ : Optional[int] =self.prepare_config_and_inputs()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Tuple =config_and_inputs
lowerCamelCase_ : List[Any] ={"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
def UpperCAmelCase__ ( self : Tuple , snake_case__ : Dict , snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : int ):
lowerCamelCase_ : Dict =20
lowerCamelCase_ : Union[str, Any] =model_class_name(snake_case__ )
lowerCamelCase_ : List[str] =model.init_cache(input_ids.shape[0] , snake_case__ )
lowerCamelCase_ : Dict =jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="i4" )
lowerCamelCase_ : Optional[Any] =jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowerCamelCase_ : List[str] =model(
input_ids[:, :-1] , attention_mask=snake_case__ , past_key_values=snake_case__ , position_ids=snake_case__ , )
lowerCamelCase_ : Optional[Any] =jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4" )
lowerCamelCase_ : Tuple =model(
input_ids[:, -1:] , attention_mask=snake_case__ , past_key_values=outputs_cache.past_key_values , position_ids=snake_case__ , )
lowerCamelCase_ : Dict =model(snake_case__ )
lowerCamelCase_ : int =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Dict , snake_case__ : str , snake_case__ : Dict ):
lowerCamelCase_ : int =20
lowerCamelCase_ : List[str] =model_class_name(snake_case__ )
lowerCamelCase_ : Tuple =jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
lowerCamelCase_ : Optional[int] =model.init_cache(input_ids.shape[0] , snake_case__ )
lowerCamelCase_ : Tuple =jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowerCamelCase_ : Optional[Any] =model(
input_ids[:, :-1] , attention_mask=snake_case__ , past_key_values=snake_case__ , position_ids=snake_case__ , )
lowerCamelCase_ : Any =jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4" )
lowerCamelCase_ : int =model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=snake_case__ , position_ids=snake_case__ , )
lowerCamelCase_ : Dict =model(snake_case__ , attention_mask=snake_case__ )
lowerCamelCase_ : Tuple =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
@require_flax
class lowercase__ ( snake_case__, snake_case__, unittest.TestCase ):
_UpperCAmelCase :Tuple = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
_UpperCAmelCase :str = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def UpperCAmelCase__ ( self : str ):
lowerCamelCase_ : int =FlaxGPTJModelTester(self )
def UpperCAmelCase__ ( self : Optional[int] ):
for model_class_name in self.all_model_classes:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
def UpperCAmelCase__ ( self : Optional[int] ):
for model_class_name in self.all_model_classes:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
@tooslow
def UpperCAmelCase__ ( self : List[str] ):
lowerCamelCase_ : Optional[int] =GPTaTokenizer.from_pretrained("gpt2" , pad_token="<|endoftext|>" , padding_side="left" )
lowerCamelCase_ : int =tokenizer(["Hello this is a long string", "Hey"] , return_tensors="np" , padding=snake_case__ , truncation=snake_case__ )
lowerCamelCase_ : Union[str, Any] =FlaxGPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B" )
lowerCamelCase_ : List[Any] =False
lowerCamelCase_ : Tuple =model.config.eos_token_id
lowerCamelCase_ : Union[str, Any] =jax.jit(model.generate )
lowerCamelCase_ : List[Any] =jit_generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , pad_token_id=tokenizer.pad_token_id ).sequences
lowerCamelCase_ : Optional[int] =tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ )
lowerCamelCase_ : List[str] =[
"Hello this is a long string of text.\n\nI'm trying to get the text of the",
"Hey, I'm a little late to the party. I'm going to",
]
self.assertListEqual(snake_case__ , snake_case__ )
@is_pt_flax_cross_test
def UpperCAmelCase__ ( self : Optional[int] ):
lowerCamelCase_ , lowerCamelCase_ : Any =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowerCamelCase_ : Any =self._prepare_for_class(snake_case__ , snake_case__ )
lowerCamelCase_ : Dict ={k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowerCamelCase_ : List[str] =model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCamelCase_ : Dict =getattr(snake_case__ , snake_case__ )
lowerCamelCase_ , lowerCamelCase_ : List[Any] =pt_inputs["input_ids"].shape
lowerCamelCase_ : Dict =np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(snake_case__ ):
lowerCamelCase_ : Dict =0
lowerCamelCase_ : Optional[Any] =1
lowerCamelCase_ : Tuple =0
lowerCamelCase_ : Optional[int] =1
lowerCamelCase_ : Union[str, Any] =pt_model_class(snake_case__ ).eval()
lowerCamelCase_ : Optional[int] =model_class(snake_case__ , dtype=jnp.floataa )
lowerCamelCase_ : Optional[int] =convert_pytorch_state_dict_to_flax(pt_model.state_dict() , snake_case__ )
lowerCamelCase_ : Any =fx_state
with torch.no_grad():
lowerCamelCase_ : int =pt_model(**snake_case__ ).to_tuple()
lowerCamelCase_ : List[str] =fx_model(**snake_case__ ).to_tuple()
self.assertEqual(len(snake_case__ ) , len(snake_case__ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(snake_case__ , snake_case__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(snake_case__ )
lowerCamelCase_ : Union[str, Any] =model_class.from_pretrained(snake_case__ , from_pt=snake_case__ )
lowerCamelCase_ : Dict =fx_model_loaded(**snake_case__ ).to_tuple()
self.assertEqual(
len(snake_case__ ) , len(snake_case__ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output_loaded, pt_output in zip(snake_case__ , snake_case__ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def UpperCAmelCase__ ( self : int ):
lowerCamelCase_ , lowerCamelCase_ : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowerCamelCase_ : Any =self._prepare_for_class(snake_case__ , snake_case__ )
lowerCamelCase_ : Union[str, Any] ={k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowerCamelCase_ : List[Any] =model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCamelCase_ : Dict =getattr(snake_case__ , snake_case__ )
lowerCamelCase_ : int =pt_model_class(snake_case__ ).eval()
lowerCamelCase_ : List[Any] =model_class(snake_case__ , dtype=jnp.floataa )
lowerCamelCase_ : Dict =load_flax_weights_in_pytorch_model(snake_case__ , fx_model.params )
lowerCamelCase_ , lowerCamelCase_ : List[Any] =pt_inputs["input_ids"].shape
lowerCamelCase_ : Optional[int] =np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(snake_case__ ):
lowerCamelCase_ : Dict =0
lowerCamelCase_ : Optional[Any] =1
lowerCamelCase_ : Union[str, Any] =0
lowerCamelCase_ : str =1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
lowerCamelCase_ : Optional[Any] =pt_model(**snake_case__ ).to_tuple()
lowerCamelCase_ : List[str] =fx_model(**snake_case__ ).to_tuple()
self.assertEqual(len(snake_case__ ) , len(snake_case__ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(snake_case__ , snake_case__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(snake_case__ )
lowerCamelCase_ : Optional[int] =pt_model_class.from_pretrained(snake_case__ , from_flax=snake_case__ )
with torch.no_grad():
lowerCamelCase_ : Union[str, Any] =pt_model_loaded(**snake_case__ ).to_tuple()
self.assertEqual(
len(snake_case__ ) , len(snake_case__ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(snake_case__ , snake_case__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def UpperCAmelCase__ ( self : Any ):
for model_class_name in self.all_model_classes:
lowerCamelCase_ : Dict =model_class_name.from_pretrained("EleutherAI/gpt-j-6B" )
lowerCamelCase_ : Tuple =model(np.ones((1, 1) ) )
self.assertIsNotNone(snake_case__ )
| 244 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCamelCase = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[Any]=7 , _UpperCAmelCase : str=3 , _UpperCAmelCase : Any=18 , _UpperCAmelCase : int=30 , _UpperCAmelCase : Tuple=400 , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : str=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : int=None , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = size if size is not None else {"height": 20, "width": 20}
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
UpperCAmelCase_ = size
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = do_convert_rgb
UpperCAmelCase_ = [512, 1024, 2048, 4096]
UpperCAmelCase_ = patch_size if patch_size is not None else {"height": 16, "width": 16}
def lowercase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def lowercase__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
UpperCAmelCase_ = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ).convert("RGB" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class lowercase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = PixaStructImageProcessor if is_vision_available() else None
def lowercase__ ( self : Optional[int] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = PixaStructImageProcessingTester(self )
@property
def lowercase__ ( self : List[Any] ) -> Any:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "do_convert_rgb" ) )
def lowercase__ ( self : str ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = self.image_processor_tester.prepare_dummy_image()
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
UpperCAmelCase_ = 2048
UpperCAmelCase_ = image_processor(_UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) )
def lowercase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
UpperCAmelCase_ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase_ = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ = image_processor(
_UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowercase__ ( self : str ) -> int:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
UpperCAmelCase_ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
UpperCAmelCase_ = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(_UpperCAmelCase ):
UpperCAmelCase_ = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
UpperCAmelCase_ = "Hello"
UpperCAmelCase_ = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase , header_text=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ = image_processor(
_UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase , header_text=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowercase__ ( self : str ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
UpperCAmelCase_ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase_ = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ = image_processor(
_UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
UpperCAmelCase_ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase_ = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ = image_processor(
_UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class lowercase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = PixaStructImageProcessor if is_vision_available() else None
def lowercase__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = PixaStructImageProcessingTester(self , num_channels=4 )
UpperCAmelCase_ = 3
@property
def lowercase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "do_convert_rgb" ) )
def lowercase__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
UpperCAmelCase_ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase_ = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ = image_processor(
_UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 82 |
"""simple docstring"""
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
return x if y == 0 else greatest_common_divisor(lowerCAmelCase__ , x % y )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
return (x * y) // greatest_common_divisor(lowerCAmelCase__ , lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ = 20 ):
UpperCAmelCase_ = 1
for i in range(1 , n + 1 ):
UpperCAmelCase_ = lcm(lowerCAmelCase__ , lowerCAmelCase__ )
return g
if __name__ == "__main__":
print(F"{solution() = }")
| 82 | 1 |
"""simple docstring"""
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
a_ = logging.get_logger(__name__)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __init__(self , lowercase__ ):
super().__init__()
snake_case_ : List[Any] = nn.ModuleList(lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = False , lowercase__ = True , ):
for i, (image, scale, controlnet) in enumerate(zip(lowercase__ , lowercase__ , self.nets ) ):
snake_case_ : str = controlnet(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , )
# merge samples
if i == 0:
snake_case_ : Tuple = down_samples, mid_sample
else:
snake_case_ : int = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowercase__ , lowercase__ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def __UpperCamelCase (self , lowercase__ , lowercase__ = True , lowercase__ = None , lowercase__ = False , lowercase__ = None , ):
snake_case_ : Optional[int] = 0
snake_case_ : List[str] = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowercase__ , is_main_process=lowercase__ , save_function=lowercase__ , safe_serialization=lowercase__ , variant=lowercase__ , )
idx += 1
snake_case_ : Any = model_path_to_save + f'_{idx}'
@classmethod
def __UpperCamelCase (cls , lowercase__ , **lowercase__ ):
snake_case_ : Any = 0
snake_case_ : Union[str, Any] = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
snake_case_ : int = pretrained_model_path
while os.path.isdir(lowercase__ ):
snake_case_ : Any = ControlNetModel.from_pretrained(lowercase__ , **lowercase__ )
controlnets.append(lowercase__ )
idx += 1
snake_case_ : Dict = pretrained_model_path + f'_{idx}'
logger.info(f'{len(lowercase__ )} controlnets loaded from {pretrained_model_path}.' )
if len(lowercase__ ) == 0:
raise ValueError(
f'No ControlNets found under {os.path.dirname(lowercase__ )}. Expected at least {pretrained_model_path + "_0"}.' )
return cls(lowercase__ )
| 707 |
"""simple docstring"""
import argparse
import copy
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
snake_case_ : List[Any] = {}
with open(SCREAMING_SNAKE_CASE__ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
snake_case_ : int = []
_list.append([line.split()[1], line.split()[2]] )
snake_case_ : Optional[Any] = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
snake_case_ : str = []
_list.append([line.split()[0], line.split()[2]] )
snake_case_ : Optional[Any] = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE__ ) as f:
snake_case_ : Optional[Any] = f.read(1 )
snake_case_ : Union[str, Any] = start_node
snake_case_ : Dict = []
snake_case_ : Union[str, Any] = start_node
snake_case_ : Tuple = 0
while visiting not in first_solution:
snake_case_ : int = 1_0_0_0_0
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(SCREAMING_SNAKE_CASE__ ) and k[0] not in first_solution:
snake_case_ : Union[str, Any] = k[1]
snake_case_ : Any = k[0]
first_solution.append(SCREAMING_SNAKE_CASE__ )
snake_case_ : Tuple = distance_of_first_solution + int(SCREAMING_SNAKE_CASE__ )
snake_case_ : List[str] = best_node
first_solution.append(SCREAMING_SNAKE_CASE__ )
snake_case_ : Optional[Any] = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
snake_case_ : int = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0_0_0_0
)
return first_solution, distance_of_first_solution
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
"""simple docstring"""
snake_case_ : Union[str, Any] = []
for n in solution[1:-1]:
snake_case_ : str = solution.index(SCREAMING_SNAKE_CASE__ )
for kn in solution[1:-1]:
snake_case_ : Tuple = solution.index(SCREAMING_SNAKE_CASE__ )
if n == kn:
continue
snake_case_ : Optional[Any] = copy.deepcopy(SCREAMING_SNAKE_CASE__ )
snake_case_ : int = kn
snake_case_ : Dict = n
snake_case_ : Optional[int] = 0
for k in _tmp[:-1]:
snake_case_ : Dict = _tmp[_tmp.index(SCREAMING_SNAKE_CASE__ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
snake_case_ : Dict = distance + int(i[1] )
_tmp.append(SCREAMING_SNAKE_CASE__ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
snake_case_ : Optional[Any] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda SCREAMING_SNAKE_CASE__ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] ):
"""simple docstring"""
snake_case_ : Dict = 1
snake_case_ : List[Any] = first_solution
snake_case_ : List[Any] = []
snake_case_ : Optional[Any] = distance_of_first_solution
snake_case_ : Dict = solution
while count <= iters:
snake_case_ : List[str] = find_neighborhood(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : List[Any] = 0
snake_case_ : List[Any] = neighborhood[index_of_best_solution]
snake_case_ : Union[str, Any] = len(SCREAMING_SNAKE_CASE__ ) - 1
snake_case_ : List[str] = False
while not found:
snake_case_ : Tuple = 0
while i < len(SCREAMING_SNAKE_CASE__ ):
if best_solution[i] != solution[i]:
snake_case_ : Optional[Any] = best_solution[i]
snake_case_ : int = solution[i]
break
snake_case_ : List[str] = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
snake_case_ : Tuple = True
snake_case_ : Dict = best_solution[:-1]
snake_case_ : Tuple = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
snake_case_ : Tuple = cost
snake_case_ : Union[str, Any] = solution
else:
snake_case_ : str = index_of_best_solution + 1
snake_case_ : Tuple = neighborhood[index_of_best_solution]
if len(SCREAMING_SNAKE_CASE__ ) >= size:
tabu_list.pop(0 )
snake_case_ : List[str] = count + 1
return best_solution_ever, best_cost
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any]=None ):
"""simple docstring"""
snake_case_ : Tuple = generate_neighbours(args.File )
snake_case_ , snake_case_ : Optional[Any] = generate_first_solution(
args.File , SCREAMING_SNAKE_CASE__ )
snake_case_ , snake_case_ : Dict = tabu_search(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , args.Iterations , args.Size , )
print(f'Best solution: {best_sol}, with total distance: {best_cost}.' )
if __name__ == "__main__":
a_ = argparse.ArgumentParser(description='''Tabu Search''')
parser.add_argument(
'''-f''',
'''--File''',
type=str,
help='''Path to the file containing the data''',
required=True,
)
parser.add_argument(
'''-i''',
'''--Iterations''',
type=int,
help='''How many iterations the algorithm should perform''',
required=True,
)
parser.add_argument(
'''-s''', '''--Size''', type=int, help='''Size of the tabu list''', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 48 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : Any = {
'configuration_timesformer': ['TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimesformerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Tuple = [
'TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimesformerModel',
'TimesformerForVideoClassification',
'TimesformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 529 |
from __future__ import annotations
def a__ ( A_, A_ ):
'''simple docstring'''
if b == 0:
return (1, 0)
((__magic_name__) , (__magic_name__)) = extended_euclid(A_, a % b )
__magic_name__ = a // b
return (y, x - k * y)
def a__ ( A_, A_, A_, A_ ):
'''simple docstring'''
((__magic_name__) , (__magic_name__)) = extended_euclid(A_, A_ )
__magic_name__ = na * na
__magic_name__ = ra * x * na + ra * y * na
return (n % m + m) % m
def a__ ( A_, A_ ):
'''simple docstring'''
((__magic_name__) , (__magic_name__)) = extended_euclid(A_, A_ )
if b < 0:
__magic_name__ = (b % n + n) % n
return b
def a__ ( A_, A_, A_, A_ ):
'''simple docstring'''
__magic_name__ , __magic_name__ = invert_modulo(A_, A_ ), invert_modulo(A_, A_ )
__magic_name__ = na * na
__magic_name__ = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='chinese_remainder_theorem', verbose=True)
testmod(name='chinese_remainder_theorem2', verbose=True)
testmod(name='invert_modulo', verbose=True)
testmod(name='extended_euclid', verbose=True)
| 529 | 1 |
import qiskit
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =qiskit.Aer.get_backend("""aer_simulator""" )
SCREAMING_SNAKE_CASE__ =qiskit.QuantumCircuit(4, 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0, 2 )
qc_ha.cx(1, 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0, 1, 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2, 0 ) # extract XOR value
qc_ha.measure(3, 1 ) # extract AND value
# Execute the circuit on the qasm simulator
SCREAMING_SNAKE_CASE__ =qiskit.execute(__UpperCamelCase, __UpperCamelCase, shots=1_000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(__UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase_ = half_adder(1, 1)
print(f"""Half Adder Output Qubit Counts: {counts}""")
| 702 |
def UpperCAmelCase_ ( __UpperCamelCase ):
for i in range(len(__UpperCamelCase ) - 1, 0, -1 ):
SCREAMING_SNAKE_CASE__ =False
for j in range(__UpperCamelCase, 0, -1 ):
if unsorted[j] < unsorted[j - 1]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =unsorted[j - 1], unsorted[j]
SCREAMING_SNAKE_CASE__ =True
for j in range(__UpperCamelCase ):
if unsorted[j] > unsorted[j + 1]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =unsorted[j + 1], unsorted[j]
SCREAMING_SNAKE_CASE__ =True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase_ = input("Enter numbers separated by a comma:\n").strip()
lowerCamelCase_ = [int(item) for item in user_input.split(",")]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 588 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=lowercase_):
"""simple docstring"""
lowerCAmelCase_ = ["""torch""", """torchsde"""]
def __init__( self : List[Any] , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : List[str] ) -> List[Any]:
requires_backends(self , ['''torch''', '''torchsde'''] )
@classmethod
def UpperCamelCase__ ( cls : Union[str, Any] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : str ) -> Tuple:
requires_backends(cls , ['''torch''', '''torchsde'''] )
@classmethod
def UpperCamelCase__ ( cls : Optional[int] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Any ) -> str:
requires_backends(cls , ['''torch''', '''torchsde'''] )
| 404 |
'''simple docstring'''
import os
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =len(grid[0] )
_UpperCamelCase =len(__SCREAMING_SNAKE_CASE )
_UpperCamelCase =0
_UpperCamelCase =0
_UpperCamelCase =0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(__SCREAMING_SNAKE_CASE ):
for j in range(n_rows - 3 ):
_UpperCamelCase =grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
_UpperCamelCase =grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
_UpperCamelCase =(
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
_UpperCamelCase =(
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
_UpperCamelCase =max(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if max_product > largest:
_UpperCamelCase =max_product
return largest
def _a ():
"""simple docstring"""
_UpperCamelCase =[]
with open(os.path.dirname(__SCREAMING_SNAKE_CASE ) + '''/grid.txt''' ) as file:
for line in file:
grid.append(line.strip('''\n''' ).split(''' ''' ) )
_UpperCamelCase =[[int(__SCREAMING_SNAKE_CASE ) for i in grid[j]] for j in range(len(__SCREAMING_SNAKE_CASE ) )]
return largest_product(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(solution())
| 404 | 1 |
"""simple docstring"""
import copy
import random
from transformers import CLIPTokenizer
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __init__(self , *lowercase__ , **lowercase__ ):
super().__init__(*lowercase__ , **lowercase__ )
snake_case_ : Union[str, Any] = {}
def __UpperCamelCase (self , lowercase__ , *lowercase__ , **lowercase__ ):
snake_case_ : Optional[int] = super().add_tokens(lowercase__ , *lowercase__ , **lowercase__ )
if num_added_tokens == 0:
raise ValueError(
f'The tokenizer already contains the token {placeholder_token}. Please pass a different'
""" `placeholder_token` that is not already in the tokenizer.""" )
def __UpperCamelCase (self , lowercase__ , *lowercase__ , lowercase__=1 , **lowercase__ ):
snake_case_ : Optional[int] = []
if num_vec_per_token == 1:
self.try_adding_tokens(lowercase__ , *lowercase__ , **lowercase__ )
output.append(lowercase__ )
else:
snake_case_ : Dict = []
for i in range(lowercase__ ):
snake_case_ : Union[str, Any] = placeholder_token + f'_{i}'
self.try_adding_tokens(lowercase__ , *lowercase__ , **lowercase__ )
output.append(lowercase__ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f'The tokenizer already has placeholder token {token} that can get confused with'
f' {placeholder_token}keep placeholder tokens independent' )
snake_case_ : str = output
def __UpperCamelCase (self , lowercase__ , lowercase__=False , lowercase__=1.0 ):
if isinstance(lowercase__ , lowercase__ ):
snake_case_ : Dict = []
for i in range(len(lowercase__ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=lowercase__ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
snake_case_ : str = self.token_map[placeholder_token]
snake_case_ : str = tokens[: 1 + int(len(lowercase__ ) * prop_tokens_to_load )]
if vector_shuffle:
snake_case_ : Optional[Any] = copy.copy(lowercase__ )
random.shuffle(lowercase__ )
snake_case_ : Optional[int] = text.replace(lowercase__ , """ """.join(lowercase__ ) )
return text
def __call__(self , lowercase__ , *lowercase__ , lowercase__=False , lowercase__=1.0 , **lowercase__ ):
return super().__call__(
self.replace_placeholder_tokens_in_text(
lowercase__ , vector_shuffle=lowercase__ , prop_tokens_to_load=lowercase__ ) , *lowercase__ , **lowercase__ , )
def __UpperCamelCase (self , lowercase__ , *lowercase__ , lowercase__=False , lowercase__=1.0 , **lowercase__ ):
return super().encode(
self.replace_placeholder_tokens_in_text(
lowercase__ , vector_shuffle=lowercase__ , prop_tokens_to_load=lowercase__ ) , *lowercase__ , **lowercase__ , )
| 718 |
"""simple docstring"""
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class __lowercase :
"""simple docstring"""
def __init__(self , lowercase__ ):
snake_case_ : Union[str, Any] = data
snake_case_ : List[str] = [0X6_7_4_5_2_3_0_1, 0Xe_f_c_d_a_b_8_9, 0X9_8_b_a_d_c_f_e, 0X1_0_3_2_5_4_7_6, 0Xc_3_d_2_e_1_f_0]
@staticmethod
def __UpperCamelCase (lowercase__ , lowercase__ ):
return ((n << b) | (n >> (32 - b))) & 0Xf_f_f_f_f_f_f_f
def __UpperCamelCase (self ):
snake_case_ : Any = B"""\x80""" + B"""\x00""" * (63 - (len(self.data ) + 8) % 64)
snake_case_ : Tuple = self.data + padding + struct.pack(""">Q""" , 8 * len(self.data ) )
return padded_data
def __UpperCamelCase (self ):
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : int = list(struct.unpack(""">16L""" , lowercase__ ) ) + [0] * 64
for i in range(16 , 80 ):
snake_case_ : Dict = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def __UpperCamelCase (self ):
snake_case_ : List[Any] = self.padding()
snake_case_ : Any = self.split_blocks()
for block in self.blocks:
snake_case_ : Any = self.expand_block(lowercase__ )
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : List[Any] = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
snake_case_ : Optional[Any] = (b & c) | ((~b) & d)
snake_case_ : List[str] = 0X5_a_8_2_7_9_9_9
elif 20 <= i < 40:
snake_case_ : Union[str, Any] = b ^ c ^ d
snake_case_ : Tuple = 0X6_e_d_9_e_b_a_1
elif 40 <= i < 60:
snake_case_ : str = (b & c) | (b & d) | (c & d)
snake_case_ : List[str] = 0X8_f_1_b_b_c_d_c
elif 60 <= i < 80:
snake_case_ : Tuple = b ^ c ^ d
snake_case_ : str = 0Xc_a_6_2_c_1_d_6
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : Optional[Any] = (
self.rotate(lowercase__ , 5 ) + f + e + k + expanded_block[i] & 0Xf_f_f_f_f_f_f_f,
a,
self.rotate(lowercase__ , 30 ),
c,
d,
)
snake_case_ : Any = (
self.h[0] + a & 0Xf_f_f_f_f_f_f_f,
self.h[1] + b & 0Xf_f_f_f_f_f_f_f,
self.h[2] + c & 0Xf_f_f_f_f_f_f_f,
self.h[3] + d & 0Xf_f_f_f_f_f_f_f,
self.h[4] + e & 0Xf_f_f_f_f_f_f_f,
)
return ("{:08x}" * 5).format(*self.h )
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : Union[str, Any] = b"""Test String"""
assert SHAaHash(SCREAMING_SNAKE_CASE__ ).final_hash() == hashlib.shaa(SCREAMING_SNAKE_CASE__ ).hexdigest() # noqa: S324
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : int = argparse.ArgumentParser(description="""Process some strings or files""" )
parser.add_argument(
"""--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument("""--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
snake_case_ : Optional[int] = parser.parse_args()
snake_case_ : Optional[int] = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
snake_case_ : List[str] = f.read()
else:
snake_case_ : Dict = bytes(SCREAMING_SNAKE_CASE__ , """utf-8""" )
print(SHAaHash(SCREAMING_SNAKE_CASE__ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 48 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def UpperCAmelCase__ ( self : List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = 1
_UpperCAmelCase = 3
_UpperCAmelCase = (32, 32)
_UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__UpperCamelCase )
return image
@property
def UpperCAmelCase__ ( self : Optional[int] ):
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__UpperCamelCase , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def UpperCAmelCase__ ( self : List[Any] ):
torch.manual_seed(0 )
_UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="gelu" , projection_dim=512 , )
return CLIPTextModel(__UpperCamelCase )
def UpperCAmelCase__ ( self : Tuple ):
_UpperCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.dummy_cond_unet_upscale
_UpperCAmelCase = DDPMScheduler()
_UpperCAmelCase = DDIMScheduler(prediction_type="v_prediction" )
_UpperCAmelCase = self.dummy_vae
_UpperCAmelCase = self.dummy_text_encoder
_UpperCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_UpperCAmelCase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCAmelCase = Image.fromarray(np.uinta(__UpperCamelCase ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_UpperCAmelCase = StableDiffusionUpscalePipeline(
unet=__UpperCamelCase , low_res_scheduler=__UpperCamelCase , scheduler=__UpperCamelCase , vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , max_noise_level=350 , )
_UpperCAmelCase = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
_UpperCAmelCase = "A painting of a squirrel eating a burger"
_UpperCAmelCase = torch.Generator(device=__UpperCamelCase ).manual_seed(0 )
_UpperCAmelCase = sd_pipe(
[prompt] , image=__UpperCamelCase , generator=__UpperCamelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
_UpperCAmelCase = output.images
_UpperCAmelCase = torch.Generator(device=__UpperCamelCase ).manual_seed(0 )
_UpperCAmelCase = sd_pipe(
[prompt] , image=__UpperCamelCase , generator=__UpperCamelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , return_dict=__UpperCamelCase , )[0]
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
_UpperCAmelCase = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
_UpperCAmelCase = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.dummy_cond_unet_upscale
_UpperCAmelCase = DDPMScheduler()
_UpperCAmelCase = DDIMScheduler(prediction_type="v_prediction" )
_UpperCAmelCase = self.dummy_vae
_UpperCAmelCase = self.dummy_text_encoder
_UpperCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_UpperCAmelCase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCAmelCase = Image.fromarray(np.uinta(__UpperCamelCase ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_UpperCAmelCase = StableDiffusionUpscalePipeline(
unet=__UpperCamelCase , low_res_scheduler=__UpperCamelCase , scheduler=__UpperCamelCase , vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , max_noise_level=350 , )
_UpperCAmelCase = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
_UpperCAmelCase = "A painting of a squirrel eating a burger"
_UpperCAmelCase = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
_UpperCAmelCase = output.images
assert image.shape[0] == 2
_UpperCAmelCase = torch.Generator(device=__UpperCamelCase ).manual_seed(0 )
_UpperCAmelCase = sd_pipe(
[prompt] , image=__UpperCamelCase , generator=__UpperCamelCase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
_UpperCAmelCase = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = self.dummy_cond_unet_upscale
_UpperCAmelCase = DDPMScheduler()
_UpperCAmelCase = DDIMScheduler(prediction_type="v_prediction" )
_UpperCAmelCase = self.dummy_vae
_UpperCAmelCase = self.dummy_text_encoder
_UpperCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_UpperCAmelCase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCAmelCase = Image.fromarray(np.uinta(__UpperCamelCase ) ).convert("RGB" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
_UpperCAmelCase = unet.half()
_UpperCAmelCase = text_encoder.half()
# make sure here that pndm scheduler skips prk
_UpperCAmelCase = StableDiffusionUpscalePipeline(
unet=__UpperCamelCase , low_res_scheduler=__UpperCamelCase , scheduler=__UpperCamelCase , vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , max_noise_level=350 , )
_UpperCAmelCase = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
_UpperCAmelCase = "A painting of a squirrel eating a burger"
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = sd_pipe(
[prompt] , image=__UpperCamelCase , generator=__UpperCamelCase , num_inference_steps=2 , output_type="np" , ).images
_UpperCAmelCase = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def UpperCAmelCase__ ( self : Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : Any ):
_UpperCAmelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_UpperCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat.npy" )
_UpperCAmelCase = "stabilityai/stable-diffusion-x4-upscaler"
_UpperCAmelCase = StableDiffusionUpscalePipeline.from_pretrained(__UpperCamelCase )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
_UpperCAmelCase = "a cat sitting on a park bench"
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , generator=__UpperCamelCase , output_type="np" , )
_UpperCAmelCase = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_UpperCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat_fp16.npy" )
_UpperCAmelCase = "stabilityai/stable-diffusion-x4-upscaler"
_UpperCAmelCase = StableDiffusionUpscalePipeline.from_pretrained(
__UpperCamelCase , torch_dtype=torch.floataa , )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
_UpperCAmelCase = "a cat sitting on a park bench"
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , generator=__UpperCamelCase , output_type="np" , )
_UpperCAmelCase = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def UpperCAmelCase__ ( self : Union[str, Any] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCAmelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_UpperCAmelCase = "stabilityai/stable-diffusion-x4-upscaler"
_UpperCAmelCase = StableDiffusionUpscalePipeline.from_pretrained(
__UpperCamelCase , torch_dtype=torch.floataa , )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_UpperCAmelCase = "a cat sitting on a park bench"
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , generator=__UpperCamelCase , num_inference_steps=5 , output_type="np" , )
_UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 684 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def __lowerCamelCase ( _lowerCAmelCase ) -> List[str]:
return getitem, k
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
return setitem, k, v
def __lowerCamelCase ( _lowerCAmelCase ) -> str:
return delitem, k
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase ) -> Optional[int]:
try:
return fun(_lowerCAmelCase , *_lowerCAmelCase ), None
except Exception as e:
return None, e
__lowerCAmelCase = (
_set("key_a", "val_a"),
_set("key_b", "val_b"),
)
__lowerCAmelCase = [
_set("key_a", "val_a"),
_set("key_a", "val_b"),
]
__lowerCAmelCase = [
_set("key_a", "val_a"),
_set("key_b", "val_b"),
_del("key_a"),
_del("key_b"),
_set("key_a", "val_a"),
_del("key_a"),
]
__lowerCAmelCase = [
_get("key_a"),
_del("key_a"),
_set("key_a", "val_a"),
_del("key_a"),
_del("key_a"),
_get("key_a"),
]
__lowerCAmelCase = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
__lowerCAmelCase = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("key_a", "val_b"),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items" ),
pytest.param(_overwrite_items , id="overwrite items" ),
pytest.param(_delete_items , id="delete items" ),
pytest.param(_access_absent_items , id="access absent items" ),
pytest.param(_add_with_resize_up , id="add with resize up" ),
pytest.param(_add_with_resize_down , id="add with resize down" ),
) , )
def __lowerCamelCase ( _lowerCAmelCase ) -> List[str]:
_UpperCAmelCase = HashMap(initial_block_size=4 )
_UpperCAmelCase = {}
for _, (fun, *args) in enumerate(_lowerCAmelCase ):
_UpperCAmelCase , _UpperCAmelCase = _run_operation(_lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = _run_operation(_lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase )
assert my_res == py_res
assert str(_lowerCAmelCase ) == str(_lowerCAmelCase )
assert set(_lowerCAmelCase ) == set(_lowerCAmelCase )
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase )
assert set(my.items() ) == set(py.items() )
def __lowerCamelCase ( ) -> List[Any]:
def is_public(_lowerCAmelCase ) -> bool:
return not name.startswith("_" )
_UpperCAmelCase = {name for name in dir({} ) if is_public(_lowerCAmelCase )}
_UpperCAmelCase = {name for name in dir(HashMap() ) if is_public(_lowerCAmelCase )}
assert dict_public_names > hash_public_names
| 684 | 1 |
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def lowercase__(A , A , A ) ->List[Any]:
"""simple docstring"""
lowercase__ : Any= 0
if start < end:
lowercase__ : Optional[int]= randint(A , A )
lowercase__ : Any= a[end]
lowercase__ : List[str]= a[pivot]
lowercase__ : Dict= temp
lowercase__, lowercase__ : Tuple= _in_place_partition(A , A , A )
count += _in_place_quick_sort(A , A , p - 1 )
count += _in_place_quick_sort(A , p + 1 , A )
return count
def lowercase__(A , A , A ) ->Tuple:
"""simple docstring"""
lowercase__ : int= 0
lowercase__ : Optional[Any]= randint(A , A )
lowercase__ : Union[str, Any]= a[end]
lowercase__ : int= a[pivot]
lowercase__ : Tuple= temp
lowercase__ : Tuple= start - 1
for index in range(A , A ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
lowercase__ : Dict= new_pivot_index + 1
lowercase__ : Optional[int]= a[new_pivot_index]
lowercase__ : Dict= a[index]
lowercase__ : Tuple= temp
lowercase__ : str= a[new_pivot_index + 1]
lowercase__ : Dict= a[end]
lowercase__ : List[str]= temp
return new_pivot_index + 1, count
a : Optional[int] = TemporaryFile()
a : int = 100 # 1000 elements are to be sorted
a , a : Tuple = 0, 1 # mean and standard deviation
a : int = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("""The array is""")
print(X)
outfile.seek(0) # using the same array
a : int = np.load(outfile)
a : str = len(M) - 1
a : Union[str, Any] = _in_place_quick_sort(M, 0, r)
print(
"""No of Comparisons for 100 elements selected from a standard normal distribution"""
"""is :"""
)
print(z)
| 85 |
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 85 | 1 |
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
snake_case_ = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class SCREAMING_SNAKE_CASE__ (nn.Module ):
def __init__( self , a):
super().__init__()
lowercase__ : int = torchvision.models.resnetaaa(pretrained=__UpperCamelCase)
lowercase__ : Tuple = list(model.children())[:-2]
lowercase__ : List[str] = nn.Sequential(*__UpperCamelCase)
lowercase__ : Dict = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds])
def snake_case_ ( self , a):
lowercase__ : int = self.pool(self.model(__UpperCamelCase))
lowercase__ : Any = torch.flatten(__UpperCamelCase , start_dim=2)
lowercase__ : str = out.transpose(1 , 2).contiguous()
return out # BxNx2048
class SCREAMING_SNAKE_CASE__ (_UpperCAmelCase ):
def __init__( self , a , a , a , a , a):
lowercase__ : Union[str, Any] = [json.loads(__UpperCamelCase) for l in open(__UpperCamelCase)]
lowercase__ : List[str] = os.path.dirname(__UpperCamelCase)
lowercase__ : str = tokenizer
lowercase__ : Any = labels
lowercase__ : Any = len(__UpperCamelCase)
lowercase__ : str = max_seq_length
lowercase__ : str = transforms
def __len__( self):
return len(self.data)
def __getitem__( self , a):
lowercase__ : int = torch.LongTensor(self.tokenizer.encode(self.data[index]['text'] , add_special_tokens=__UpperCamelCase))
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = sentence[0], sentence[1:-1], sentence[-1]
lowercase__ : Optional[int] = sentence[: self.max_seq_length]
lowercase__ : List[str] = torch.zeros(self.n_classes)
lowercase__ : Optional[int] = 1
lowercase__ : List[Any] = Image.open(os.path.join(self.data_dir , self.data[index]['img'])).convert('RGB')
lowercase__ : List[str] = self.transforms(__UpperCamelCase)
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def snake_case_ ( self):
lowercase__ : Tuple = Counter()
for row in self.data:
label_freqs.update(row['label'])
return label_freqs
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Any ):
'''simple docstring'''
lowercase__ : Tuple = [len(row['sentence'] ) for row in batch]
lowercase__ , lowercase__ : Any = len(_A ), max(_A )
lowercase__ : Any = torch.zeros(_A , _A , dtype=torch.long )
lowercase__ : List[str] = torch.zeros(_A , _A , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(_A , _A ) ):
lowercase__ : int = input_row['sentence']
lowercase__ : int = 1
lowercase__ : Any = torch.stack([row['image'] for row in batch] )
lowercase__ : int = torch.stack([row['label'] for row in batch] )
lowercase__ : Tuple = torch.stack([row['image_start_token'] for row in batch] )
lowercase__ : int = torch.stack([row['image_end_token'] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def snake_case__ ( ):
'''simple docstring'''
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def snake_case__ ( ):
'''simple docstring'''
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.4677_7044, 0.4453_1429, 0.4066_1017] , std=[0.1222_1994, 0.1214_5835, 0.1438_0469] , ),
] )
| 164 |
"""simple docstring"""
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
a : List[str] = logging.get_logger(__name__)
class a_ :
def __init__( self : List[str] , __UpperCamelCase : Any , __UpperCamelCase : Tuple ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = question_encoder
_UpperCAmelCase = generator
_UpperCAmelCase = self.question_encoder
def _snake_case ( self : int , __UpperCamelCase : str ) ->Dict:
'''simple docstring'''
if os.path.isfile(__UpperCamelCase ):
raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
_UpperCAmelCase = os.path.join(__UpperCamelCase , """question_encoder_tokenizer""" )
_UpperCAmelCase = os.path.join(__UpperCamelCase , """generator_tokenizer""" )
self.question_encoder.save_pretrained(__UpperCamelCase )
self.generator.save_pretrained(__UpperCamelCase )
@classmethod
def _snake_case ( cls : Optional[Any] , __UpperCamelCase : Tuple , **__UpperCamelCase : Optional[Any] ) ->Tuple:
'''simple docstring'''
from ..auto.tokenization_auto import AutoTokenizer
_UpperCAmelCase = kwargs.pop("""config""" , __UpperCamelCase )
if config is None:
_UpperCAmelCase = RagConfig.from_pretrained(__UpperCamelCase )
_UpperCAmelCase = AutoTokenizer.from_pretrained(
__UpperCamelCase , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" )
_UpperCAmelCase = AutoTokenizer.from_pretrained(
__UpperCamelCase , config=config.generator , subfolder="""generator_tokenizer""" )
return cls(question_encoder=__UpperCamelCase , generator=__UpperCamelCase )
def __call__( self : Any , *__UpperCamelCase : List[Any] , **__UpperCamelCase : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
return self.current_tokenizer(*__UpperCamelCase , **__UpperCamelCase )
def _snake_case ( self : Union[str, Any] , *__UpperCamelCase : List[Any] , **__UpperCamelCase : Tuple ) ->Any:
'''simple docstring'''
return self.generator.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def _snake_case ( self : Optional[int] , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : List[str] ) ->Union[str, Any]:
'''simple docstring'''
return self.generator.decode(*__UpperCamelCase , **__UpperCamelCase )
def _snake_case ( self : Union[str, Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = self.question_encoder
def _snake_case ( self : Tuple ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = self.generator
def _snake_case ( self : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[List[str]] = None , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : str = "longest" , __UpperCamelCase : str = None , __UpperCamelCase : bool = True , **__UpperCamelCase : Tuple , ) ->BatchEncoding:
'''simple docstring'''
warnings.warn(
"""`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """
"""regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """
"""context manager to prepare your targets. See the documentation of your specific tokenizer for more """
"""details""" , __UpperCamelCase , )
if max_length is None:
_UpperCAmelCase = self.current_tokenizer.model_max_length
_UpperCAmelCase = self(
__UpperCamelCase , add_special_tokens=__UpperCamelCase , return_tensors=__UpperCamelCase , max_length=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , **__UpperCamelCase , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
_UpperCAmelCase = self.current_tokenizer.model_max_length
_UpperCAmelCase = self(
text_target=__UpperCamelCase , add_special_tokens=__UpperCamelCase , return_tensors=__UpperCamelCase , padding=__UpperCamelCase , max_length=__UpperCamelCase , truncation=__UpperCamelCase , **__UpperCamelCase , )
_UpperCAmelCase = labels["""input_ids"""]
return model_inputs | 555 | 0 |
def __lowercase ( _A ) -> list[int]:
SCREAMING_SNAKE_CASE : int = [0 for i in range(len(_A ) )]
# initialize interval's left pointer and right pointer
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = 0, 0
for i in range(1 , len(_A ) ):
# case when current index is inside the interval
if i <= right_pointer:
SCREAMING_SNAKE_CASE : Any = min(right_pointer - i + 1 , z_result[i - left_pointer] )
SCREAMING_SNAKE_CASE : Optional[Any] = min_edge
while go_next(_A , _A , _A ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = i, i + z_result[i] - 1
return z_result
def __lowercase ( _A , _A , _A ) -> bool:
return i + z_result[i] < len(_A ) and s[z_result[i]] == s[i + z_result[i]]
def __lowercase ( _A , _A ) -> int:
SCREAMING_SNAKE_CASE : Any = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
SCREAMING_SNAKE_CASE : List[Any] = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(_A ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 446 |
def __lowercase ( _A = 50 ) -> int:
SCREAMING_SNAKE_CASE : Tuple = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 446 | 1 |
"""simple docstring"""
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : list[list[int]] ):
"""simple docstring"""
snake_case_ : Tuple = len(SCREAMING_SNAKE_CASE__ )
# We need to create solution object to save path.
snake_case_ : int = [[0 for _ in range(SCREAMING_SNAKE_CASE__ )] for _ in range(SCREAMING_SNAKE_CASE__ )]
snake_case_ : Union[str, Any] = run_maze(SCREAMING_SNAKE_CASE__ , 0 , 0 , SCREAMING_SNAKE_CASE__ )
if solved:
print("""\n""".join(str(SCREAMING_SNAKE_CASE__ ) for row in solutions ) )
else:
print("""No solution exists!""" )
return solved
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : list[list[int]] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[list[int]] ):
"""simple docstring"""
snake_case_ : int = len(SCREAMING_SNAKE_CASE__ )
# Final check point.
if i == j == (size - 1):
snake_case_ : Optional[int] = 1
return True
snake_case_ : Optional[Any] = (not i < 0) and (not j < 0) # Check lower bounds
snake_case_ : int = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
snake_case_ : Tuple = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
snake_case_ : List[str] = 1
# check for directions
if (
run_maze(SCREAMING_SNAKE_CASE__ , i + 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
or run_maze(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , j + 1 , SCREAMING_SNAKE_CASE__ )
or run_maze(SCREAMING_SNAKE_CASE__ , i - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
or run_maze(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , j - 1 , SCREAMING_SNAKE_CASE__ )
):
return True
snake_case_ : Tuple = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 480 |
"""simple docstring"""
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class __lowercase :
"""simple docstring"""
_A : float
_A : TreeNode | None = None
_A : TreeNode | None = None
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : TreeNode | None ):
"""simple docstring"""
def is_valid_tree(SCREAMING_SNAKE_CASE__ : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
"""Each node should be type of TreeNode and data should be float.""" )
def is_binary_search_tree_recursive_check(
SCREAMING_SNAKE_CASE__ : TreeNode | None , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , SCREAMING_SNAKE_CASE__ , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , SCREAMING_SNAKE_CASE__ )
)
return is_binary_search_tree_recursive_check(SCREAMING_SNAKE_CASE__ , -float("""inf""" ) , float("""inf""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 480 | 1 |
import sys
import turtle
def lowerCAmelCase__ ( a__ , a__ ) ->tuple[float, float]:
'''simple docstring'''
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ , ) ->None:
'''simple docstring'''
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(a__ , get_mid(a__ , a__ ) , get_mid(a__ , a__ ) , depth - 1 )
triangle(a__ , get_mid(a__ , a__ ) , get_mid(a__ , a__ ) , depth - 1 )
triangle(a__ , get_mid(a__ , a__ ) , get_mid(a__ , a__ ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
lowerCamelCase__ = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
lowerCamelCase__ = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 82 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'''
),
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''dpr'''
def __init__( self : Optional[Any] , lowercase_ : int=30522 , lowercase_ : str=768 , lowercase_ : List[Any]=12 , lowercase_ : Dict=12 , lowercase_ : str=3072 , lowercase_ : Any="gelu" , lowercase_ : Any=0.1 , lowercase_ : Any=0.1 , lowercase_ : str=512 , lowercase_ : str=2 , lowercase_ : List[Any]=0.02 , lowercase_ : Dict=1e-1_2 , lowercase_ : List[str]=0 , lowercase_ : Union[str, Any]="absolute" , lowercase_ : int = 0 , **lowercase_ : int , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=lowercase_ , **lowercase_)
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_act
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = projection_dim
_UpperCamelCase = position_embedding_type
| 82 | 1 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
SCREAMING_SNAKE_CASE = 16
SCREAMING_SNAKE_CASE = 32
def _lowerCamelCase ( __A : Accelerator , __A : int = 16 , __A : str = "bert-base-cased" ) -> List[Any]:
_UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(__A )
_UpperCAmelCase : Union[str, Any] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__A : str ):
# max_length=None => use the model max length (it's actually the default)
_UpperCAmelCase : Dict = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__A , max_length=__A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_UpperCAmelCase : Optional[int] = datasets.map(
__A , batched=__A , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=__A )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCAmelCase : str = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__A : List[str] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__A , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(__A , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
_UpperCAmelCase : List[Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=__A , collate_fn=__A , batch_size=__A )
_UpperCAmelCase : Tuple = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__A , collate_fn=__A , batch_size=__A )
return train_dataloader, eval_dataloader
def _lowerCamelCase ( __A : Tuple , __A : int , __A : Dict , __A : List[str] ) -> List[str]:
model.eval()
_UpperCAmelCase : Dict = 0
for step, batch in enumerate(__A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCAmelCase : int = model(**__A )
_UpperCAmelCase : Any = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
_UpperCAmelCase , _UpperCAmelCase : Tuple = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__A ) - 1:
_UpperCAmelCase : Union[str, Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_UpperCAmelCase : Optional[int] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__A , references=__A , )
_UpperCAmelCase : int = metric.compute()
return eval_metric["accuracy"]
def _lowerCamelCase ( __A : Any , __A : Optional[Any] ) -> Tuple:
# Initialize accelerator
_UpperCAmelCase : Any = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCAmelCase : List[str] = config['''lr''']
_UpperCAmelCase : Dict = int(config['''num_epochs'''] )
_UpperCAmelCase : int = int(config['''seed'''] )
_UpperCAmelCase : str = int(config['''batch_size'''] )
_UpperCAmelCase : Union[str, Any] = args.model_name_or_path
set_seed(__A )
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = get_dataloaders(__A , __A , __A )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCAmelCase : List[str] = AutoModelForSequenceClassification.from_pretrained(__A , return_dict=__A )
# Instantiate optimizer
_UpperCAmelCase : Union[str, Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_UpperCAmelCase : str = optimizer_cls(params=model.parameters() , lr=__A )
if accelerator.state.deepspeed_plugin is not None:
_UpperCAmelCase : Union[str, Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
_UpperCAmelCase : Optional[Any] = 1
_UpperCAmelCase : str = (len(__A ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_UpperCAmelCase : Dict = get_linear_schedule_with_warmup(
optimizer=__A , num_warmup_steps=0 , num_training_steps=__A , )
else:
_UpperCAmelCase : int = DummyScheduler(__A , total_num_steps=__A , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = accelerator.prepare(
__A , __A , __A , __A , __A )
# We need to keep track of how many total steps we have iterated over
_UpperCAmelCase : Union[str, Any] = 0
# We also need to keep track of the stating epoch so files are named properly
_UpperCAmelCase : Tuple = 0
_UpperCAmelCase : str = evaluate.load('''glue''' , '''mrpc''' )
_UpperCAmelCase : Dict = num_epochs
if args.partial_train_epoch is not None:
_UpperCAmelCase : Optional[Any] = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
_UpperCAmelCase : List[Any] = args.resume_from_checkpoint.split('''epoch_''' )[1]
_UpperCAmelCase : Optional[Any] = ''''''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
_UpperCAmelCase : Union[str, Any] = int(__A ) + 1
_UpperCAmelCase : Any = evaluation_loop(__A , __A , __A , __A )
accelerator.print('''resumed checkpoint performance:''' , __A )
accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''' , lr_scheduler.get_lr()[0] )
accelerator.print('''resumed optimizers\'s lr:''' , optimizer.param_groups[0]['''lr'''] )
with open(os.path.join(args.output_dir , f'''state_{starting_epoch-1}.json''' ) , '''r''' ) as f:
_UpperCAmelCase : int = json.load(__A )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
_UpperCAmelCase : List[str] = {}
for epoch in range(__A , __A ):
model.train()
for step, batch in enumerate(__A ):
_UpperCAmelCase : List[str] = model(**__A )
_UpperCAmelCase : str = outputs.loss
_UpperCAmelCase : Any = loss / gradient_accumulation_steps
accelerator.backward(__A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
_UpperCAmelCase : Any = f'''epoch_{epoch}'''
_UpperCAmelCase : List[Any] = os.path.join(args.output_dir , __A )
accelerator.save_state(__A )
_UpperCAmelCase : str = evaluation_loop(__A , __A , __A , __A )
_UpperCAmelCase : Any = accuracy
_UpperCAmelCase : int = lr_scheduler.get_lr()[0]
_UpperCAmelCase : Any = optimizer.param_groups[0]['''lr''']
_UpperCAmelCase : List[str] = epoch
_UpperCAmelCase : Optional[int] = overall_step
accelerator.print(f'''epoch {epoch}:''' , __A )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f'''state_{epoch}.json''' ) , '''w''' ) as f:
json.dump(__A , __A )
def _lowerCamelCase ( ) -> Optional[Any]:
_UpperCAmelCase : Tuple = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=__A , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=__A , )
parser.add_argument(
'''--output_dir''' , type=__A , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=__A , default=__A , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--partial_train_epoch''' , type=__A , default=__A , help='''If passed, the training will stop after this number of epochs.''' , )
parser.add_argument(
'''--num_epochs''' , type=__A , default=2 , help='''Number of train epochs.''' , )
_UpperCAmelCase : Optional[Any] = parser.parse_args()
_UpperCAmelCase : Optional[int] = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(__A , __A )
if __name__ == "__main__":
main()
| 485 |
def _lowerCamelCase ( __A : float ) -> float:
if edge <= 0 or not isinstance(__A , __A ):
raise ValueError('''Length must be a positive.''' )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def _lowerCamelCase ( __A : float ) -> float:
if edge <= 0 or not isinstance(__A , __A ):
raise ValueError('''Length must be a positive.''' )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 485 | 1 |
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class __A ( datasets.BuilderConfig ):
'''simple docstring'''
lowerCAmelCase : Optional[datasets.Features] = None
class __A ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = PandasConfig
def UpperCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : List[Any] ) -> Any:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
lowercase__ : Optional[int] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_snake_case ,(str, list, tuple) ):
lowercase__ : Any = data_files
if isinstance(_snake_case ,_snake_case ):
lowercase__ : int = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowercase__ : Union[str, Any] = [dl_manager.iter_files(_snake_case ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={'''files''': files} )]
lowercase__ : Optional[int] = []
for split_name, files in data_files.items():
if isinstance(_snake_case ,_snake_case ):
lowercase__ : Union[str, Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowercase__ : int = [dl_manager.iter_files(_snake_case ) for file in files]
splits.append(datasets.SplitGenerator(name=_snake_case ,gen_kwargs={'''files''': files} ) )
return splits
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : pa.Table ) -> pa.Table:
"""simple docstring"""
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
lowercase__ : int = table_cast(_snake_case ,self.config.features.arrow_schema )
return pa_table
def UpperCAmelCase ( self : int ,_snake_case : List[str] ) -> List[str]:
"""simple docstring"""
for i, file in enumerate(itertools.chain.from_iterable(_snake_case ) ):
with open(_snake_case ,'''rb''' ) as f:
lowercase__ : int = pa.Table.from_pandas(pd.read_pickle(_snake_case ) )
yield i, self._cast_table(_snake_case )
| 718 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __A :
'''simple docstring'''
def __init__( self : Optional[Any] ,_snake_case : Any ,_snake_case : str=13 ,_snake_case : int=64 ,_snake_case : Dict=2 ,_snake_case : int=3 ,_snake_case : Optional[Any]=True ,_snake_case : List[str]=True ,_snake_case : Dict=32 ,_snake_case : int=5 ,_snake_case : Any=4 ,_snake_case : Optional[int]=37 ,_snake_case : Dict="gelu" ,_snake_case : Union[str, Any]=0.1 ,_snake_case : List[Any]=0.1 ,_snake_case : int=10 ,_snake_case : Any=0.02 ,_snake_case : List[str]=[1, 16, 4, 4] ,_snake_case : str=None ,) -> List[str]:
"""simple docstring"""
lowercase__ : Optional[int] = parent
lowercase__ : Tuple = batch_size
lowercase__ : Union[str, Any] = image_size
lowercase__ : Dict = patch_size
lowercase__ : Dict = num_channels
lowercase__ : str = is_training
lowercase__ : Optional[int] = use_labels
lowercase__ : Dict = hidden_size
lowercase__ : List[Any] = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
lowercase__ : List[str] = intermediate_size
lowercase__ : List[Any] = hidden_act
lowercase__ : Tuple = hidden_dropout_prob
lowercase__ : Union[str, Any] = attention_probs_dropout_prob
lowercase__ : str = type_sequence_label_size
lowercase__ : Tuple = initializer_range
lowercase__ : Union[str, Any] = scope
lowercase__ : Optional[Any] = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
lowercase__ : List[str] = (self.image_size // 32) ** 2
lowercase__ : List[str] = num_patches + 1
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : int = None
if self.use_labels:
lowercase__ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase__ : List[str] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
lowercase__ : str = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [4, 8, 16, 32],
'''num_groups''': 2,
}
return ViTHybridConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_snake_case ,initializer_range=self.initializer_range ,backbone_featmap_shape=self.backbone_featmap_shape ,backbone_config=_snake_case ,)
def UpperCAmelCase ( self : int ,_snake_case : Dict ,_snake_case : str ,_snake_case : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ : int = ViTHybridModel(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : str = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Dict ,_snake_case : Dict ,_snake_case : Dict ) -> List[str]:
"""simple docstring"""
lowercase__ : List[str] = self.type_sequence_label_size
lowercase__ : str = ViTHybridForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : List[str] = model(_snake_case ,labels=_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Any = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : List[Any] = config_and_inputs
lowercase__ : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __A ( A_ ,A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
lowerCAmelCase : Optional[int] = (
{"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : Any = False
lowerCAmelCase : Optional[int] = False
def UpperCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
lowercase__ : str = ViTHybridModelTester(self )
lowercase__ : int = ConfigTester(self ,config_class=_snake_case ,has_text_modality=_snake_case ,hidden_size=37 )
def UpperCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def UpperCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
pass
def UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = model_class(_snake_case )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
lowercase__ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_snake_case ,nn.Linear ) )
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ , lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Any = model_class(_snake_case )
lowercase__ : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : str = [*signature.parameters.keys()]
lowercase__ : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,_snake_case )
def UpperCAmelCase ( self : Any ) -> str:
"""simple docstring"""
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowercase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
def UpperCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
lowercase__ , lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Optional[Any] = _config_zero_init(_snake_case )
for model_class in self.all_model_classes:
lowercase__ : Dict = model_class(config=_snake_case )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
lowercase__ : Optional[int] = [f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
@slow
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Optional[int] = ViTHybridModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def __UpperCAmelCase ( ) -> Dict:
lowercase__ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase__ : List[str] = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
_snake_case )
lowercase__ : Union[str, Any] = self.default_image_processor
lowercase__ : Any = prepare_img()
lowercase__ : Optional[Any] = image_processor(images=_snake_case ,return_tensors='''pt''' ).to(_snake_case )
# forward pass
with torch.no_grad():
lowercase__ : Optional[int] = model(**_snake_case )
# verify the logits
lowercase__ : Any = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape ,_snake_case )
lowercase__ : str = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_snake_case ,atol=1e-4 ) )
@slow
@require_accelerate
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Dict = ViTHybridImageProcessor.from_pretrained('''google/vit-hybrid-base-bit-384''' )
lowercase__ : Dict = ViTHybridForImageClassification.from_pretrained('''google/vit-hybrid-base-bit-384''' ,device_map='''auto''' )
lowercase__ : Optional[int] = prepare_img()
lowercase__ : List[str] = image_processor(images=_snake_case ,return_tensors='''pt''' )
lowercase__ : Union[str, Any] = model(**_snake_case )
lowercase__ : List[str] = outputs.logits
# model predicts one of the 1000 ImageNet classes
lowercase__ : List[str] = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] ,'''tabby, tabby cat''' )
| 122 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE__ = {'''configuration_yolos''': ['''YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''YolosConfig''', '''YolosOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ['''YolosFeatureExtractor''']
SCREAMING_SNAKE_CASE__ = ['''YolosImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''YolosForObjectDetection''',
'''YolosModel''',
'''YolosPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 47 |
from collections.abc import Sequence
from queue import Queue
class _UpperCamelCase:
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Tuple=None ):
'''simple docstring'''
__a : Tuple = start
__a : Dict = end
__a : List[str] = val
__a : List[Any] = (start + end) // 2
__a : Optional[Any] = left
__a : List[str] = right
def __repr__( self : Dict ):
'''simple docstring'''
return f'''SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'''
class _UpperCamelCase:
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Sequence , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
__a : Tuple = collection
__a : Dict = function
if self.collection:
__a : int = self._build_tree(0 , len(SCREAMING_SNAKE_CASE__ ) - 1 )
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
self._update_tree(self.root , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
return self._query_range(self.root , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
if start == end:
return SegmentTreeNode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.collection[start] )
__a : Tuple = (start + end) // 2
__a : Optional[int] = self._build_tree(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : Tuple = self._build_tree(mid + 1 , SCREAMING_SNAKE_CASE__ )
return SegmentTreeNode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.fn(left.val , right.val ) , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
if node.start == i and node.end == i:
__a : Optional[Any] = val
return
if i <= node.mid:
self._update_tree(node.left , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
self._update_tree(node.right , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : int = self.fn(node.left.val , node.right.val )
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , SCREAMING_SNAKE_CASE__ , node.mid ) , self._query_range(node.right , node.mid + 1 , SCREAMING_SNAKE_CASE__ ) , )
else:
# range in right child tree
return self._query_range(node.right , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
if self.root is not None:
__a : Tuple = Queue()
queue.put(self.root )
while not queue.empty():
__a : Tuple = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('''*''' * 50)
SCREAMING_SNAKE_CASE__ = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 47 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case : Union[str, Any] = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Dict = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
__snake_case : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 615 |
"""simple docstring"""
import string
from math import logaa
def _lowercase ( __snake_case ,__snake_case ) -> int:
__lowerCAmelCase : int = document.translate(
str.maketrans("" ,"" ,string.punctuation ) ).replace("\n" ,"" )
__lowerCAmelCase : Dict = document_without_punctuation.split(" " ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def _lowercase ( __snake_case ,__snake_case ) -> tuple[int, int]:
__lowerCAmelCase : Optional[Any] = corpus.lower().translate(
str.maketrans("" ,"" ,string.punctuation ) ) # strip all punctuation and replace it with ''
__lowerCAmelCase : List[str] = corpus_without_punctuation.split("\n" )
__lowerCAmelCase : str = term.lower()
return (len([doc for doc in docs if term in doc] ), len(__snake_case ))
def _lowercase ( __snake_case ,__snake_case ,__snake_case=False ) -> float:
if smoothing:
if n == 0:
raise ValueError("log10(0) is undefined." )
return round(1 + logaa(n / (1 + df) ) ,3 )
if df == 0:
raise ZeroDivisionError("df must be > 0" )
elif n == 0:
raise ValueError("log10(0) is undefined." )
return round(logaa(n / df ) ,3 )
def _lowercase ( __snake_case ,__snake_case ) -> float:
return round(tf * idf ,3 ) | 615 | 1 |
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''',
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''efficientnet'''
def __init__( self : Optional[int] , lowercase_ : int = 3 , lowercase_ : int = 600 , lowercase_ : float = 2.0 , lowercase_ : float = 3.1 , lowercase_ : int = 8 , lowercase_ : List[int] = [3, 3, 5, 3, 5, 5, 3] , lowercase_ : List[int] = [32, 16, 24, 40, 80, 112, 192] , lowercase_ : List[int] = [16, 24, 40, 80, 112, 192, 320] , lowercase_ : List[int] = [] , lowercase_ : List[int] = [1, 2, 2, 2, 1, 2, 1] , lowercase_ : List[int] = [1, 2, 2, 3, 3, 4, 1] , lowercase_ : List[int] = [1, 6, 6, 6, 6, 6, 6] , lowercase_ : float = 0.25 , lowercase_ : str = "swish" , lowercase_ : int = 2560 , lowercase_ : str = "mean" , lowercase_ : float = 0.02 , lowercase_ : float = 0.0_01 , lowercase_ : float = 0.99 , lowercase_ : float = 0.5 , lowercase_ : float = 0.2 , **lowercase_ : int , ) -> Any:
"""simple docstring"""
super().__init__(**lowercase_)
_UpperCamelCase = num_channels
_UpperCamelCase = image_size
_UpperCamelCase = width_coefficient
_UpperCamelCase = depth_coefficient
_UpperCamelCase = depth_divisor
_UpperCamelCase = kernel_sizes
_UpperCamelCase = in_channels
_UpperCamelCase = out_channels
_UpperCamelCase = depthwise_padding
_UpperCamelCase = strides
_UpperCamelCase = num_block_repeats
_UpperCamelCase = expand_ratios
_UpperCamelCase = squeeze_expansion_ratio
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dim
_UpperCamelCase = pooling_type
_UpperCamelCase = initializer_range
_UpperCamelCase = batch_norm_eps
_UpperCamelCase = batch_norm_momentum
_UpperCamelCase = dropout_rate
_UpperCamelCase = drop_connect_rate
_UpperCamelCase = sum(lowercase_) * 4
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = version.parse('''1.11''' )
@property
def __UpperCAmelCase ( self : Union[str, Any]) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def __UpperCAmelCase ( self : Optional[Any]) -> float:
"""simple docstring"""
return 1e-5
| 547 | def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
if not isinstance(a__ , a__ ) or number < 0:
raise ValueError("Input must be a non-negative integer" )
_UpperCamelCase = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 547 | 1 |
"""simple docstring"""
lowerCAmelCase__ = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowerCAmelCase__ = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowerCAmelCase__ = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 544 |
"""simple docstring"""
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class _lowerCamelCase :
pass
| 544 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Any = logging.get_logger(__name__)
A_ : Tuple = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowerCAmelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : List[Any] = '''megatron-bert'''
def __init__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : str=29_056 , _SCREAMING_SNAKE_CASE : Any=1_024 , _SCREAMING_SNAKE_CASE : Optional[Any]=24 , _SCREAMING_SNAKE_CASE : Optional[Any]=16 , _SCREAMING_SNAKE_CASE : Dict=4_096 , _SCREAMING_SNAKE_CASE : Union[str, Any]="gelu" , _SCREAMING_SNAKE_CASE : List[str]=0.1 , _SCREAMING_SNAKE_CASE : Any=0.1 , _SCREAMING_SNAKE_CASE : str=512 , _SCREAMING_SNAKE_CASE : List[Any]=2 , _SCREAMING_SNAKE_CASE : Optional[int]=0.0_2 , _SCREAMING_SNAKE_CASE : Tuple=1E-1_2 , _SCREAMING_SNAKE_CASE : List[str]=0 , _SCREAMING_SNAKE_CASE : List[str]="absolute" , _SCREAMING_SNAKE_CASE : int=True , **_SCREAMING_SNAKE_CASE : Dict , ) -> Tuple:
"""simple docstring"""
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : int = hidden_size
SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE : int = num_attention_heads
SCREAMING_SNAKE_CASE : Any = hidden_act
SCREAMING_SNAKE_CASE : Tuple = intermediate_size
SCREAMING_SNAKE_CASE : int = hidden_dropout_prob
SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Any = type_vocab_size
SCREAMING_SNAKE_CASE : Any = initializer_range
SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE : List[Any] = position_embedding_type
SCREAMING_SNAKE_CASE : Union[str, Any] = use_cache
| 265 |
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
A_ : int = get_tests_dir('fixtures')
A_ : int = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
A_ : Dict = get_tests_dir('fixtures/dummy-config.json')
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = 0
def _lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = AutoFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : List[Any] = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
SCREAMING_SNAKE_CASE : Optional[Any] = AutoFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE ).to_dict()
config_dict.pop('feature_extractor_type' )
SCREAMING_SNAKE_CASE : int = WavaVecaFeatureExtractor(**_SCREAMING_SNAKE_CASE )
# save in new folder
model_config.save_pretrained(_SCREAMING_SNAKE_CASE )
config.save_pretrained(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[Any] = AutoFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE )
# make sure private variable is not incorrectly saved
SCREAMING_SNAKE_CASE : Optional[int] = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = AutoFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE , 'bert-base is not a local folder and is not a valid model identifier' ):
SCREAMING_SNAKE_CASE : str = AutoFeatureExtractor.from_pretrained('bert-base' )
def _lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
SCREAMING_SNAKE_CASE : List[str] = AutoFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE , revision='aaaaaa' )
def _lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
SCREAMING_SNAKE_CASE : int = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model' )
def _lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE : int = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Optional[int] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[str] = AutoFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE , trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
def _lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
try:
AutoConfig.register('custom' , _SCREAMING_SNAKE_CASE )
AutoFeatureExtractor.register(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
AutoFeatureExtractor.register(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE : Union[str, Any] = CustomFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Dict = AutoFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def _lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
class lowerCAmelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : str = True
try:
AutoConfig.register('custom' , _SCREAMING_SNAKE_CASE )
AutoFeatureExtractor.register(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE : str = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE : List[Any] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE : Optional[int] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(not hasattr(_SCREAMING_SNAKE_CASE , 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 265 | 1 |
'''simple docstring'''
def A (__lowerCamelCase :List[str] , __lowerCamelCase :Tuple , __lowerCamelCase :str , __lowerCamelCase :Any ):
# Return True if there is node that has not iterated.
_lowerCAmelCase = [False] * len(__lowerCamelCase )
_lowerCAmelCase = []
queue.append(__lowerCamelCase )
_lowerCAmelCase = True
while queue:
_lowerCAmelCase = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__lowerCamelCase )
_lowerCAmelCase = True
_lowerCAmelCase = u
return visited[t]
def A (__lowerCamelCase :str , __lowerCamelCase :Optional[Any] , __lowerCamelCase :Optional[Any] ):
# This array is filled by BFS and to store path
_lowerCAmelCase = [-1] * (len(__lowerCamelCase ))
_lowerCAmelCase = 0
while bfs(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
_lowerCAmelCase = float("""Inf""" )
_lowerCAmelCase = sink
while s != source:
# Find the minimum value in select path
_lowerCAmelCase = min(__lowerCamelCase , graph[parent[s]][s] )
_lowerCAmelCase = parent[s]
max_flow += path_flow
_lowerCAmelCase = sink
while v != source:
_lowerCAmelCase = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_lowerCAmelCase = parent[v]
return max_flow
_lowercase = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
_lowercase , _lowercase = 0, 5
print(ford_fulkerson(graph, source, sink))
| 162 |
'''simple docstring'''
from heapq import heappop, heappush
import numpy as np
def A (__lowerCamelCase :np.ndarray , __lowerCamelCase :tuple[int, int] , __lowerCamelCase :tuple[int, int] , __lowerCamelCase :bool , ):
_lowerCAmelCase , _lowerCAmelCase = grid.shape
_lowerCAmelCase = [-1, 1, 0, 0]
_lowerCAmelCase = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
_lowerCAmelCase , _lowerCAmelCase = [(0, source)], set()
_lowerCAmelCase = np.full((rows, cols) , np.inf )
_lowerCAmelCase = 0
_lowerCAmelCase = np.empty((rows, cols) , dtype=__lowerCamelCase )
_lowerCAmelCase = None
while queue:
((_lowerCAmelCase) , (_lowerCAmelCase)) = heappop(__lowerCamelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
_lowerCAmelCase = []
while (x, y) != source:
path.append((x, y) )
_lowerCAmelCase , _lowerCAmelCase = predecessors[x, y]
path.append(__lowerCamelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(__lowerCamelCase ) ):
_lowerCAmelCase , _lowerCAmelCase = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
_lowerCAmelCase = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(__lowerCamelCase , (dist + 1, (nx, ny)) )
_lowerCAmelCase = dist + 1
_lowerCAmelCase = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 162 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowercase ( self : Any ) -> Dict:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
SCREAMING_SNAKE_CASE_ = TFAutoModel.from_pretrained(snake_case_ , from_pt=snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
SCREAMING_SNAKE_CASE_ = AutoModel.from_pretrained(snake_case_ , from_tf=snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
@slow
def _lowercase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
SCREAMING_SNAKE_CASE_ = TFAutoModelForPreTraining.from_pretrained(snake_case_ , from_pt=snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
SCREAMING_SNAKE_CASE_ = AutoModelForPreTraining.from_pretrained(snake_case_ , from_tf=snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
@slow
def _lowercase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
SCREAMING_SNAKE_CASE_ = TFAutoModelForCausalLM.from_pretrained(snake_case_ , from_pt=snake_case_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = TFAutoModelForCausalLM.from_pretrained(
snake_case_ , output_loading_info=snake_case_ , from_pt=snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
SCREAMING_SNAKE_CASE_ = AutoModelForCausalLM.from_pretrained(snake_case_ , from_tf=snake_case_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = AutoModelForCausalLM.from_pretrained(
snake_case_ , output_loading_info=snake_case_ , from_tf=snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
@slow
def _lowercase ( self : Dict ) -> Dict:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
SCREAMING_SNAKE_CASE_ = TFAutoModelWithLMHead.from_pretrained(snake_case_ , from_pt=snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
SCREAMING_SNAKE_CASE_ = AutoModelWithLMHead.from_pretrained(snake_case_ , from_tf=snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
@slow
def _lowercase ( self : Any ) -> Optional[int]:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
SCREAMING_SNAKE_CASE_ = TFAutoModelForMaskedLM.from_pretrained(snake_case_ , from_pt=snake_case_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = TFAutoModelForMaskedLM.from_pretrained(
snake_case_ , output_loading_info=snake_case_ , from_pt=snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
SCREAMING_SNAKE_CASE_ = AutoModelForMaskedLM.from_pretrained(snake_case_ , from_tf=snake_case_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = AutoModelForMaskedLM.from_pretrained(
snake_case_ , output_loading_info=snake_case_ , from_tf=snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
@slow
def _lowercase ( self : List[Any] ) -> int:
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
SCREAMING_SNAKE_CASE_ = TFAutoModelForSeqaSeqLM.from_pretrained(snake_case_ , from_pt=snake_case_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = TFAutoModelForSeqaSeqLM.from_pretrained(
snake_case_ , output_loading_info=snake_case_ , from_pt=snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
SCREAMING_SNAKE_CASE_ = AutoModelForSeqaSeqLM.from_pretrained(snake_case_ , from_tf=snake_case_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = AutoModelForSeqaSeqLM.from_pretrained(
snake_case_ , output_loading_info=snake_case_ , from_tf=snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
@slow
def _lowercase ( self : Any ) -> Dict:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
SCREAMING_SNAKE_CASE_ = TFAutoModelForSequenceClassification.from_pretrained(snake_case_ , from_pt=snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
SCREAMING_SNAKE_CASE_ = AutoModelForSequenceClassification.from_pretrained(snake_case_ , from_tf=snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
@slow
def _lowercase ( self : Any ) -> Dict:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
SCREAMING_SNAKE_CASE_ = TFAutoModelForQuestionAnswering.from_pretrained(snake_case_ , from_pt=snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
SCREAMING_SNAKE_CASE_ = AutoModelForQuestionAnswering.from_pretrained(snake_case_ , from_tf=snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
def _lowercase ( self : List[str] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TFAutoModelWithLMHead.from_pretrained(snake_case_ , from_pt=snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=snake_case_ ) , 14_410 )
SCREAMING_SNAKE_CASE_ = AutoModelWithLMHead.from_pretrained(snake_case_ , from_tf=snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=snake_case_ ) , 14_410 )
def _lowercase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TFAutoModelWithLMHead.from_pretrained(snake_case_ , from_pt=snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=snake_case_ ) , 14_410 )
SCREAMING_SNAKE_CASE_ = AutoModelWithLMHead.from_pretrained(snake_case_ , from_tf=snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=snake_case_ ) , 14_410 )
| 393 |
"""simple docstring"""
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
__snake_case = str(bin(SCREAMING_SNAKE_CASE ) )[2:] # remove the leading "0b"
__snake_case = str(bin(SCREAMING_SNAKE_CASE ) )[2:]
__snake_case = max(len(SCREAMING_SNAKE_CASE ) , len(SCREAMING_SNAKE_CASE ) )
return "0b" + "".join(
str(int("1" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE ) , b_binary.zfill(SCREAMING_SNAKE_CASE ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 163 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
A_ = {'''tokenization_bertweet''': ['''BertweetTokenizer''']}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 498 |
"""simple docstring"""
from timeit import timeit
def _lowerCAmelCase ( UpperCAmelCase__ : int ) ->int:
if number < 0:
raise ValueError("""the value of input must not be negative""" )
A__ : Optional[int] = 0
while number:
number &= number - 1
result += 1
return result
def _lowerCAmelCase ( UpperCAmelCase__ : int ) ->int:
if number < 0:
raise ValueError("""the value of input must not be negative""" )
A__ : Tuple = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def _lowerCAmelCase ( ) ->None:
def do_benchmark(UpperCAmelCase__ : int ) -> None:
A__ : Optional[int] = """import __main__ as z"""
print(f'Benchmark when {number = }:' )
print(f'{get_set_bits_count_using_modulo_operator(UpperCAmelCase__ ) = }' )
A__ : Any = timeit("""z.get_set_bits_count_using_modulo_operator(25)""", setup=UpperCAmelCase__ )
print(f'timeit() runs in {timing} seconds' )
print(f'{get_set_bits_count_using_brian_kernighans_algorithm(UpperCAmelCase__ ) = }' )
A__ : List[str] = timeit(
"""z.get_set_bits_count_using_brian_kernighans_algorithm(25)""", setup=UpperCAmelCase__, )
print(f'timeit() runs in {timing} seconds' )
for number in (2_5, 3_7, 5_8, 0):
do_benchmark(UpperCAmelCase__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 498 | 1 |
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("""0.8.3"""):
raise Exception("""requires gluonnlp == 0.8.3""")
if version.parse(mx.__version__) != version.parse("""1.5.0"""):
raise Exception("""requires mxnet == 1.5.0""")
logging.set_verbosity_info()
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : int = """The Nymphenburg Palace is a beautiful palace in Munich!"""
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[int]:
"""simple docstring"""
A__ = {
'''attention_cell''': '''multi_head''',
'''num_layers''': 4,
'''units''': 1_024,
'''hidden_size''': 768,
'''max_length''': 512,
'''num_heads''': 8,
'''scaled''': True,
'''dropout''': 0.1,
'''use_residual''': True,
'''embed_size''': 1_024,
'''embed_dropout''': 0.1,
'''word_embed''': None,
'''layer_norm_eps''': 1E-5,
'''token_type_vocab_size''': 2,
}
A__ = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
A__ = BERTEncoder(
attention_cell=predefined_args['''attention_cell'''] , num_layers=predefined_args['''num_layers'''] , units=predefined_args['''units'''] , hidden_size=predefined_args['''hidden_size'''] , max_length=predefined_args['''max_length'''] , num_heads=predefined_args['''num_heads'''] , scaled=predefined_args['''scaled'''] , dropout=predefined_args['''dropout'''] , output_attention=lowercase_ , output_all_encodings=lowercase_ , use_residual=predefined_args['''use_residual'''] , activation=predefined_args.get('''activation''' , '''gelu''' ) , layer_norm_eps=predefined_args.get('''layer_norm_eps''' , lowercase_ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
A__ = '''openwebtext_ccnews_stories_books_cased'''
# Specify download folder to Gluonnlp's vocab
A__ = os.path.join(get_home_dir() , '''models''' )
A__ = _load_vocab(lowercase_ , lowercase_ , lowercase_ , cls=lowercase_ )
A__ = nlp.model.BERTModel(
lowercase_ , len(lowercase_ ) , units=predefined_args['''units'''] , embed_size=predefined_args['''embed_size'''] , embed_dropout=predefined_args['''embed_dropout'''] , word_embed=predefined_args['''word_embed'''] , use_pooler=lowercase_ , use_token_type_embed=lowercase_ , token_type_vocab_size=predefined_args['''token_type_vocab_size'''] , use_classifier=lowercase_ , use_decoder=lowercase_ , )
original_bort.load_parameters(lowercase_ , cast_dtype=lowercase_ , ignore_extra=lowercase_ )
A__ = original_bort._collect_params_with_prefix()
# Build our config 🤗
A__ = {
'''architectures''': ['''BertForMaskedLM'''],
'''attention_probs_dropout_prob''': predefined_args['''dropout'''],
'''hidden_act''': '''gelu''',
'''hidden_dropout_prob''': predefined_args['''dropout'''],
'''hidden_size''': predefined_args['''embed_size'''],
'''initializer_range''': 0.02,
'''intermediate_size''': predefined_args['''hidden_size'''],
'''layer_norm_eps''': predefined_args['''layer_norm_eps'''],
'''max_position_embeddings''': predefined_args['''max_length'''],
'''model_type''': '''bort''',
'''num_attention_heads''': predefined_args['''num_heads'''],
'''num_hidden_layers''': predefined_args['''num_layers'''],
'''pad_token_id''': 1, # 2 = BERT, 1 = RoBERTa
'''type_vocab_size''': 1, # 2 = BERT, 1 = RoBERTa
'''vocab_size''': len(lowercase_ ),
}
A__ = BertConfig.from_dict(lowercase_ )
A__ = BertForMaskedLM(lowercase_ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(lowercase_ ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(lowercase_ , lowercase_ ):
A__ = hf_param.shape
A__ = to_torch(params[gluon_param] )
A__ = gluon_param.shape
assert (
shape_hf == shape_gluon
), f"""The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"""
return gluon_param
A__ = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , '''word_embed.0.weight''' )
A__ = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , '''encoder.position_weight''' )
A__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , '''encoder.layer_norm.beta''' )
A__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , '''encoder.layer_norm.gamma''' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
A__ = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
A__ = hf_bort_model.bert.encoder.layer[i]
# self attention
A__ = layer.attention.self
A__ = check_and_map_params(
self_attn.key.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.bias""" )
A__ = check_and_map_params(
self_attn.key.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.weight""" )
A__ = check_and_map_params(
self_attn.query.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.bias""" )
A__ = check_and_map_params(
self_attn.query.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.weight""" )
A__ = check_and_map_params(
self_attn.value.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.bias""" )
A__ = check_and_map_params(
self_attn.value.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.weight""" )
# self attention output
A__ = layer.attention.output
A__ = check_and_map_params(
self_output.dense.bias , f"""encoder.transformer_cells.{i}.proj.bias""" )
A__ = check_and_map_params(
self_output.dense.weight , f"""encoder.transformer_cells.{i}.proj.weight""" )
A__ = check_and_map_params(
self_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.layer_norm.beta""" )
A__ = check_and_map_params(
self_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.layer_norm.gamma""" )
# intermediate
A__ = layer.intermediate
A__ = check_and_map_params(
intermediate.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_1.bias""" )
A__ = check_and_map_params(
intermediate.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_1.weight""" )
# output
A__ = layer.output
A__ = check_and_map_params(
bert_output.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_2.bias""" )
A__ = check_and_map_params(
bert_output.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_2.weight""" )
A__ = check_and_map_params(
bert_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.ffn.layer_norm.beta""" )
A__ = check_and_map_params(
bert_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.ffn.layer_norm.gamma""" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
A__ = RobertaTokenizer.from_pretrained('''roberta-base''' )
A__ = tokenizer.encode_plus(lowercase_ )['''input_ids''']
# Get gluon output
A__ = mx.nd.array([input_ids] )
A__ = original_bort(inputs=lowercase_ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(lowercase_ )
A__ = BertModel.from_pretrained(lowercase_ )
hf_bort_model.eval()
A__ = tokenizer.encode_plus(lowercase_ , return_tensors='''pt''' )
A__ = hf_bort_model(**lowercase_ )[0]
A__ = output_gluon[0].asnumpy()
A__ = output_hf[0].detach().numpy()
A__ = np.max(np.abs(hf_layer - gluon_layer ) ).item()
A__ = np.allclose(lowercase_ , lowercase_ , atol=1E-3 )
if success:
print('''✔️ Both model do output the same tensors''' )
else:
print('''❌ Both model do **NOT** output the same tensors''' )
print('''Absolute difference is:''' , lowercase_ )
if __name__ == "__main__":
_lowerCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--bort_checkpoint_path""", default=None, type=str, required=True, help="""Path the official Bort params file."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_lowerCamelCase : str = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 87 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
_UpperCamelCase : str = {
"configuration_gpt_neox_japanese": ["GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXJapaneseConfig"],
"tokenization_gpt_neox_japanese": ["GPTNeoXJapaneseTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Dict = [
"GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXJapaneseForCausalLM",
"GPTNeoXJapaneseLayer",
"GPTNeoXJapaneseModel",
"GPTNeoXJapanesePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
_UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 284 | 0 |
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
SCREAMING_SNAKE_CASE__ = False
try:
SCREAMING_SNAKE_CASE__ = _is_package_available('google.colab')
except ModuleNotFoundError:
pass
@input.register
class a_ :
def __init__( self , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = [] ) -> Tuple:
"""simple docstring"""
UpperCamelCase = 0
UpperCamelCase = choices
UpperCamelCase = prompt
if sys.platform == "win32":
UpperCamelCase = """*"""
else:
UpperCamelCase = """➔ """
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = "" ) -> Tuple:
"""simple docstring"""
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , lowercase__ )
else:
forceWrite(self.choices[index] , lowercase__ )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
if index == self.position:
forceWrite(F" {self.arrow_char} " )
self.write_choice(lowercase__ )
else:
forceWrite(F" {self.choices[index]}" )
reset_cursor()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 1 ) -> int:
"""simple docstring"""
UpperCamelCase = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(lowercase__ )
move_cursor(lowercase__ , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["""up"""] )
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
self.move_direction(Direction.UP )
@input.mark(KEYMAP["""down"""] )
def A__ ( self ) -> Tuple:
"""simple docstring"""
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["""newline"""] )
def A__ ( self ) -> Tuple:
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , """DOWN""" )
return self.position
@input.mark(KEYMAP["""interrupt"""] )
def A__ ( self ) -> Tuple:
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , """DOWN""" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(lowercase__ )] for number in range(10 )] )
def A__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = int(chr(self.current_selection ) )
UpperCamelCase = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , lowercase__ )
else:
return
else:
return
def A__ ( self , _SCREAMING_SNAKE_CASE = 0 ) -> List[str]:
"""simple docstring"""
if self.prompt:
linebreak()
forceWrite(self.prompt , """\n""" )
if in_colab:
forceWrite("""Please input a choice index (starting from 0), and press enter""" , """\n""" )
else:
forceWrite("""Please select a choice using the arrow or number keys, and selecting with enter""" , """\n""" )
UpperCamelCase = default_choice
for i in range(len(self.choices ) ):
self.print_choice(lowercase__ )
forceWrite("""\n""" )
move_cursor(len(self.choices ) - self.position , """UP""" )
with cursor.hide():
while True:
if in_colab:
try:
UpperCamelCase = int(builtins.input() )
except ValueError:
UpperCamelCase = default_choice
else:
UpperCamelCase = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , """UP""" )
clear_line()
self.write_choice(lowercase__ , """\n""" )
return choice
| 703 |
'''simple docstring'''
from __future__ import annotations
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> tuple[float, list[float]]:
UpperCamelCase = list(range(len(__UpperCamelCase ) ) )
UpperCamelCase = [v / w for v, w in zip(__UpperCamelCase , __UpperCamelCase )]
index.sort(key=lambda __UpperCamelCase : ratio[i] , reverse=__UpperCamelCase )
UpperCamelCase = 0
UpperCamelCase = [0] * len(__UpperCamelCase )
for i in index:
if weight[i] <= capacity:
UpperCamelCase = 1
max_value += value[i]
capacity -= weight[i]
else:
UpperCamelCase = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 0 |
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
snake_case__ : List[Any] = logging.get_logger(__name__)
snake_case__ : str = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
snake_case__ : List[str] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Any:
for attribute in key.split("." ):
_UpperCAmelCase =getattr(UpperCamelCase_ , UpperCamelCase_ )
if weight_type is not None:
_UpperCAmelCase =getattr(UpperCamelCase_ , UpperCamelCase_ ).shape
else:
_UpperCAmelCase =hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
_UpperCAmelCase =value
elif weight_type == "weight_g":
_UpperCAmelCase =value
elif weight_type == "weight_v":
_UpperCAmelCase =value
elif weight_type == "bias":
_UpperCAmelCase =value
else:
_UpperCAmelCase =value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ) ->Dict:
_UpperCAmelCase =[]
_UpperCAmelCase =fairseq_model.state_dict()
_UpperCAmelCase =hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
_UpperCAmelCase =None
for name, value in fairseq_dict.items():
_UpperCAmelCase =False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , hf_model.config.feat_extract_norm == "group" , )
_UpperCAmelCase =True
elif name.split("." )[0] == "proj":
_UpperCAmelCase =fairseq_model.proj
_UpperCAmelCase =True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
_UpperCAmelCase =True
if "*" in mapped_key:
_UpperCAmelCase =name.split(UpperCamelCase_ )[0].split("." )[-2]
_UpperCAmelCase =mapped_key.replace("*" , UpperCamelCase_ )
if "weight_g" in name:
_UpperCAmelCase ="weight_g"
elif "weight_v" in name:
_UpperCAmelCase ="weight_v"
elif "bias" in name:
_UpperCAmelCase ="bias"
elif "weight" in name:
_UpperCAmelCase ="weight"
else:
_UpperCAmelCase =None
set_recursively(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
continue
if not is_used:
unused_weights.append(UpperCamelCase_ )
logger.warning(F"Unused weights: {unused_weights}" )
return proj_weight
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->List[str]:
_UpperCAmelCase =full_name.split("conv_layers." )[-1]
_UpperCAmelCase =name.split("." )
_UpperCAmelCase =int(items[0] )
_UpperCAmelCase =int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
_UpperCAmelCase =value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
_UpperCAmelCase =value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
_UpperCAmelCase =value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
_UpperCAmelCase =value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(UpperCamelCase_ )
def lowerCamelCase__ ( _lowerCamelCase ) ->Tuple:
_UpperCAmelCase , _UpperCAmelCase =emb.weight.shape
_UpperCAmelCase =nn.Linear(UpperCamelCase_ , UpperCamelCase_ , bias=UpperCamelCase_ )
_UpperCAmelCase =emb.weight.data
return lin_layer
def lowerCamelCase__ ( _lowerCamelCase ) ->Dict:
with open(UpperCamelCase_ , "r" , encoding="utf-8" ) as f:
_UpperCAmelCase =f.readlines()
_UpperCAmelCase =[line.split(" " )[0] for line in lines]
_UpperCAmelCase =len(UpperCamelCase_ )
_UpperCAmelCase ={
"<s>": 0,
"<pad>": 1,
"</s>": 2,
"<unk>": 3,
}
vocab_dict.update(dict(zip(UpperCamelCase_ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) ->str:
_UpperCAmelCase =WavaVecaConfig.from_pretrained(UpperCamelCase_ )
_UpperCAmelCase =SpeechaTextaConfig.from_pretrained(
UpperCamelCase_ , vocab_size=UpperCamelCase_ , decoder_layers=UpperCamelCase_ , do_stable_layer_norm=UpperCamelCase_ )
_UpperCAmelCase =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
_UpperCAmelCase =model[0].eval()
# set weights for wav2vec2 encoder
_UpperCAmelCase =WavaVecaModel(UpperCamelCase_ )
_UpperCAmelCase =recursively_load_weights_wavaveca(model.encoder , UpperCamelCase_ )
_UpperCAmelCase =SpeechaTextaForCausalLM(UpperCamelCase_ )
_UpperCAmelCase , _UpperCAmelCase =hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=UpperCamelCase_ )
# set output linear layer
unexpected_keys.remove("embed_out" )
_UpperCAmelCase =nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(F"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
_UpperCAmelCase =SpeechEncoderDecoderModel(encoder=UpperCamelCase_ , decoder=UpperCamelCase_ )
_UpperCAmelCase =False
# add projection layer
_UpperCAmelCase =nn.Parameter(projection_layer.weight )
_UpperCAmelCase =nn.Parameter(projection_layer.bias )
_UpperCAmelCase =create_vocab_dict(UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , "vocab.json" ) , "w" ) as fp:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
_UpperCAmelCase =SpeechaTextaTokenizer(os.path.join(UpperCamelCase_ , "vocab.json" ) )
tokenizer.save_pretrained(UpperCamelCase_ )
_UpperCAmelCase =hf_wavavec.config.to_dict()
_UpperCAmelCase =tokenizer.pad_token_id
_UpperCAmelCase =tokenizer.bos_token_id
_UpperCAmelCase =tokenizer.eos_token_id
_UpperCAmelCase ="speech_to_text_2"
_UpperCAmelCase ="wav2vec2"
_UpperCAmelCase =SpeechEncoderDecoderConfig.from_dict(UpperCamelCase_ )
hf_wavavec.save_pretrained(UpperCamelCase_ )
feature_extractor.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
snake_case__ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-large-lv60',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/s2t-small-mustc-en-fr-st',
type=str,
help='Path to hf decoder s2t checkpoint config',
)
parser.add_argument('--vocab_size', default=1_0_2_2_4, type=int, help='Vocab size of decoder')
parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers')
snake_case__ : str = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 408 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
UpperCAmelCase__ : int = {
"Acehnese Arabic": "ace_Arab",
"Acehnese Latin": "ace_Latn",
"Mesopotamian Arabic": "acm_Arab",
"Ta'izzi-Adeni Arabic": "acq_Arab",
"Tunisian Arabic": "aeb_Arab",
"Afrikaans": "afr_Latn",
"South Levantine Arabic": "ajp_Arab",
"Akan": "aka_Latn",
"Amharic": "amh_Ethi",
"North Levantine Arabic": "apc_Arab",
"Modern Standard Arabic": "arb_Arab",
"Modern Standard Arabic Romanized": "arb_Latn",
"Najdi Arabic": "ars_Arab",
"Moroccan Arabic": "ary_Arab",
"Egyptian Arabic": "arz_Arab",
"Assamese": "asm_Beng",
"Asturian": "ast_Latn",
"Awadhi": "awa_Deva",
"Central Aymara": "ayr_Latn",
"South Azerbaijani": "azb_Arab",
"North Azerbaijani": "azj_Latn",
"Bashkir": "bak_Cyrl",
"Bambara": "bam_Latn",
"Balinese": "ban_Latn",
"Belarusian": "bel_Cyrl",
"Bemba": "bem_Latn",
"Bengali": "ben_Beng",
"Bhojpuri": "bho_Deva",
"Banjar Arabic": "bjn_Arab",
"Banjar Latin": "bjn_Latn",
"Standard Tibetan": "bod_Tibt",
"Bosnian": "bos_Latn",
"Buginese": "bug_Latn",
"Bulgarian": "bul_Cyrl",
"Catalan": "cat_Latn",
"Cebuano": "ceb_Latn",
"Czech": "ces_Latn",
"Chokwe": "cjk_Latn",
"Central Kurdish": "ckb_Arab",
"Crimean Tatar": "crh_Latn",
"Welsh": "cym_Latn",
"Danish": "dan_Latn",
"German": "deu_Latn",
"Southwestern Dinka": "dik_Latn",
"Dyula": "dyu_Latn",
"Dzongkha": "dzo_Tibt",
"Greek": "ell_Grek",
"English": "eng_Latn",
"Esperanto": "epo_Latn",
"Estonian": "est_Latn",
"Basque": "eus_Latn",
"Ewe": "ewe_Latn",
"Faroese": "fao_Latn",
"Fijian": "fij_Latn",
"Finnish": "fin_Latn",
"Fon": "fon_Latn",
"French": "fra_Latn",
"Friulian": "fur_Latn",
"Nigerian Fulfulde": "fuv_Latn",
"Scottish Gaelic": "gla_Latn",
"Irish": "gle_Latn",
"Galician": "glg_Latn",
"Guarani": "grn_Latn",
"Gujarati": "guj_Gujr",
"Haitian Creole": "hat_Latn",
"Hausa": "hau_Latn",
"Hebrew": "heb_Hebr",
"Hindi": "hin_Deva",
"Chhattisgarhi": "hne_Deva",
"Croatian": "hrv_Latn",
"Hungarian": "hun_Latn",
"Armenian": "hye_Armn",
"Igbo": "ibo_Latn",
"Ilocano": "ilo_Latn",
"Indonesian": "ind_Latn",
"Icelandic": "isl_Latn",
"Italian": "ita_Latn",
"Javanese": "jav_Latn",
"Japanese": "jpn_Jpan",
"Kabyle": "kab_Latn",
"Jingpho": "kac_Latn",
"Kamba": "kam_Latn",
"Kannada": "kan_Knda",
"Kashmiri Arabic": "kas_Arab",
"Kashmiri Devanagari": "kas_Deva",
"Georgian": "kat_Geor",
"Central Kanuri Arabic": "knc_Arab",
"Central Kanuri Latin": "knc_Latn",
"Kazakh": "kaz_Cyrl",
"Kabiyè": "kbp_Latn",
"Kabuverdianu": "kea_Latn",
"Khmer": "khm_Khmr",
"Kikuyu": "kik_Latn",
"Kinyarwanda": "kin_Latn",
"Kyrgyz": "kir_Cyrl",
"Kimbundu": "kmb_Latn",
"Northern Kurdish": "kmr_Latn",
"Kikongo": "kon_Latn",
"Korean": "kor_Hang",
"Lao": "lao_Laoo",
"Ligurian": "lij_Latn",
"Limburgish": "lim_Latn",
"Lingala": "lin_Latn",
"Lithuanian": "lit_Latn",
"Lombard": "lmo_Latn",
"Latgalian": "ltg_Latn",
"Luxembourgish": "ltz_Latn",
"Luba-Kasai": "lua_Latn",
"Ganda": "lug_Latn",
"Luo": "luo_Latn",
"Mizo": "lus_Latn",
"Standard Latvian": "lvs_Latn",
"Magahi": "mag_Deva",
"Maithili": "mai_Deva",
"Malayalam": "mal_Mlym",
"Marathi": "mar_Deva",
"Minangkabau Arabic ": "min_Arab",
"Minangkabau Latin": "min_Latn",
"Macedonian": "mkd_Cyrl",
"Plateau Malagasy": "plt_Latn",
"Maltese": "mlt_Latn",
"Meitei Bengali": "mni_Beng",
"Halh Mongolian": "khk_Cyrl",
"Mossi": "mos_Latn",
"Maori": "mri_Latn",
"Burmese": "mya_Mymr",
"Dutch": "nld_Latn",
"Norwegian Nynorsk": "nno_Latn",
"Norwegian Bokmål": "nob_Latn",
"Nepali": "npi_Deva",
"Northern Sotho": "nso_Latn",
"Nuer": "nus_Latn",
"Nyanja": "nya_Latn",
"Occitan": "oci_Latn",
"West Central Oromo": "gaz_Latn",
"Odia": "ory_Orya",
"Pangasinan": "pag_Latn",
"Eastern Panjabi": "pan_Guru",
"Papiamento": "pap_Latn",
"Western Persian": "pes_Arab",
"Polish": "pol_Latn",
"Portuguese": "por_Latn",
"Dari": "prs_Arab",
"Southern Pashto": "pbt_Arab",
"Ayacucho Quechua": "quy_Latn",
"Romanian": "ron_Latn",
"Rundi": "run_Latn",
"Russian": "rus_Cyrl",
"Sango": "sag_Latn",
"Sanskrit": "san_Deva",
"Santali": "sat_Olck",
"Sicilian": "scn_Latn",
"Shan": "shn_Mymr",
"Sinhala": "sin_Sinh",
"Slovak": "slk_Latn",
"Slovenian": "slv_Latn",
"Samoan": "smo_Latn",
"Shona": "sna_Latn",
"Sindhi": "snd_Arab",
"Somali": "som_Latn",
"Southern Sotho": "sot_Latn",
"Spanish": "spa_Latn",
"Tosk Albanian": "als_Latn",
"Sardinian": "srd_Latn",
"Serbian": "srp_Cyrl",
"Swati": "ssw_Latn",
"Sundanese": "sun_Latn",
"Swedish": "swe_Latn",
"Swahili": "swh_Latn",
"Silesian": "szl_Latn",
"Tamil": "tam_Taml",
"Tatar": "tat_Cyrl",
"Telugu": "tel_Telu",
"Tajik": "tgk_Cyrl",
"Tagalog": "tgl_Latn",
"Thai": "tha_Thai",
"Tigrinya": "tir_Ethi",
"Tamasheq Latin": "taq_Latn",
"Tamasheq Tifinagh": "taq_Tfng",
"Tok Pisin": "tpi_Latn",
"Tswana": "tsn_Latn",
"Tsonga": "tso_Latn",
"Turkmen": "tuk_Latn",
"Tumbuka": "tum_Latn",
"Turkish": "tur_Latn",
"Twi": "twi_Latn",
"Central Atlas Tamazight": "tzm_Tfng",
"Uyghur": "uig_Arab",
"Ukrainian": "ukr_Cyrl",
"Umbundu": "umb_Latn",
"Urdu": "urd_Arab",
"Northern Uzbek": "uzn_Latn",
"Venetian": "vec_Latn",
"Vietnamese": "vie_Latn",
"Waray": "war_Latn",
"Wolof": "wol_Latn",
"Xhosa": "xho_Latn",
"Eastern Yiddish": "ydd_Hebr",
"Yoruba": "yor_Latn",
"Yue Chinese": "yue_Hant",
"Chinese Simplified": "zho_Hans",
"Chinese Traditional": "zho_Hant",
"Standard Malay": "zsm_Latn",
"Zulu": "zul_Latn",
}
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Tuple = 'facebook/nllb-200-distilled-600M'
snake_case__ :Optional[Any] = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
snake_case__ :List[Any] = 'translator'
snake_case__ :List[Any] = AutoTokenizer
snake_case__ :Optional[Any] = AutoModelForSeqaSeqLM
snake_case__ :List[str] = LANGUAGE_CODES
snake_case__ :List[Any] = ['text', 'text', 'text']
snake_case__ :List[Any] = ['text']
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] ):
"""simple docstring"""
if src_lang not in self.lang_to_code:
raise ValueError(f"""{src_lang} is not a supported language.""" )
if tgt_lang not in self.lang_to_code:
raise ValueError(f"""{tgt_lang} is not a supported language.""" )
lowerCAmelCase__ = self.lang_to_code[src_lang]
lowerCAmelCase__ = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
__magic_name__ , return_tensors="pt" , src_lang=__magic_name__ , tgt_lang=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Optional[Any] ):
"""simple docstring"""
return self.model.generate(**__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : Tuple ):
"""simple docstring"""
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=__magic_name__ )
| 48 | 0 |
"""simple docstring"""
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__=False ) -> Tuple:
try:
a_ : List[str] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
a_ : List[Any] = default
else:
# KEY is set, convert it to True or False.
try:
a_ : Dict = strtobool(SCREAMING_SNAKE_CASE__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
SCREAMING_SNAKE_CASE_ = parse_flag_from_env("""RUN_SLOW""", default=False)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
return unittest.skip("Test was skipped" )(SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> List[Any]:
return unittest.skipUnless(_run_slow_tests, "test is slow" )(SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> List[str]:
return unittest.skipUnless(not torch.cuda.is_available(), "test requires only a CPU" )(SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> Tuple:
return unittest.skipUnless(torch.cuda.is_available(), "test requires a GPU" )(SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> List[str]:
return unittest.skipUnless(is_xpu_available(), "test requires a XPU" )(SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> List[str]:
return unittest.skipUnless(is_mps_available(), "test requires a `mps` backend support in `torch`" )(SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> Tuple:
return unittest.skipUnless(
is_transformers_available() and is_datasets_available(), "test requires the Hugging Face suite" )(SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> List[Any]:
return unittest.skipUnless(is_bnb_available(), "test requires the bitsandbytes library" )(SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
return unittest.skipUnless(is_tpu_available(), "test requires TPU" )(SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> List[str]:
return unittest.skipUnless(torch.cuda.device_count() == 1, "test requires a GPU" )(SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> Dict:
return unittest.skipUnless(torch.xpu.device_count() == 1, "test requires a XPU" )(SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> Dict:
return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs" )(SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs" )(SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> Any:
return unittest.skipUnless(is_safetensors_available(), "test requires safetensors" )(SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> Tuple:
return unittest.skipUnless(is_deepspeed_available(), "test requires DeepSpeed" )(SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> Any:
return unittest.skipUnless(is_torch_version(">=", "1.12.0" ), "test requires torch version >= 1.12.0" )(SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__=None, SCREAMING_SNAKE_CASE__=None ) -> Any:
if test_case is None:
return partial(SCREAMING_SNAKE_CASE__, version=SCREAMING_SNAKE_CASE__ )
return unittest.skipUnless(is_torch_version(">=", SCREAMING_SNAKE_CASE__ ), F"""test requires torch version >= {version}""" )(SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
return unittest.skipUnless(is_tensorboard_available(), "test requires Tensorboard" )(SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> int:
return unittest.skipUnless(is_wandb_available(), "test requires wandb" )(SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> int:
return unittest.skipUnless(is_comet_ml_available(), "test requires comet_ml" )(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE_ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> Dict:
return unittest.skipUnless(
_atleast_one_tracker_available, "test requires at least one tracker to be available and for `comet_ml` to not be installed", )(SCREAMING_SNAKE_CASE__ )
class snake_case_ ( unittest.TestCase ):
__lowerCAmelCase = True
@classmethod
def snake_case_ ( cls ):
a_ : Optional[int] = tempfile.mkdtemp()
@classmethod
def snake_case_ ( cls ):
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def snake_case_ ( self ):
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("**/*" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(a_ )
class snake_case_ ( unittest.TestCase ):
def snake_case_ ( self ):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class snake_case_ ( unittest.TestCase ):
def snake_case_ ( self , a_ ):
a_ : Union[str, Any] = mocks if isinstance(a_ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> int:
a_ : List[str] = AcceleratorState()
a_ : Union[str, Any] = tensor[None].clone().to(state.device )
a_ : List[str] = gather(SCREAMING_SNAKE_CASE__ ).cpu()
a_ : Tuple = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i], SCREAMING_SNAKE_CASE__ ):
return False
return True
class snake_case_ :
def __init__( self , a_ , a_ , a_ ):
a_ : Any = returncode
a_ : Optional[Any] = stdout
a_ : Optional[Any] = stderr
async def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
while True:
a_ : Union[str, Any] = await stream.readline()
if line:
callback(SCREAMING_SNAKE_CASE__ )
else:
break
async def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__=None, SCREAMING_SNAKE_CASE__=None, SCREAMING_SNAKE_CASE__=None, SCREAMING_SNAKE_CASE__=False, SCREAMING_SNAKE_CASE__=False ) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(SCREAMING_SNAKE_CASE__ ) )
a_ : Dict = await asyncio.create_subprocess_exec(
cmd[0], *cmd[1:], stdin=SCREAMING_SNAKE_CASE__, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, env=SCREAMING_SNAKE_CASE__, )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
a_ : str = []
a_ : int = []
def tee(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__="" ):
a_ : List[str] = line.decode("utf-8" ).rstrip()
sink.append(SCREAMING_SNAKE_CASE__ )
if not quiet:
print(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, file=SCREAMING_SNAKE_CASE__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout, lambda SCREAMING_SNAKE_CASE__ : tee(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, sys.stdout, label="stdout:" ) ) ),
asyncio.create_task(_read_stream(p.stderr, lambda SCREAMING_SNAKE_CASE__ : tee(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, sys.stderr, label="stderr:" ) ) ),
], timeout=SCREAMING_SNAKE_CASE__, )
return _RunOutput(await p.wait(), SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__=None, SCREAMING_SNAKE_CASE__=None, SCREAMING_SNAKE_CASE__=180, SCREAMING_SNAKE_CASE__=False, SCREAMING_SNAKE_CASE__=True ) -> _RunOutput:
a_ : List[str] = asyncio.get_event_loop()
a_ : List[str] = loop.run_until_complete(
_stream_subprocess(SCREAMING_SNAKE_CASE__, env=SCREAMING_SNAKE_CASE__, stdin=SCREAMING_SNAKE_CASE__, timeout=SCREAMING_SNAKE_CASE__, quiet=SCREAMING_SNAKE_CASE__, echo=SCREAMING_SNAKE_CASE__ ) )
a_ : Any = " ".join(SCREAMING_SNAKE_CASE__ )
if result.returncode > 0:
a_ : Dict = "\n".join(result.stderr )
raise RuntimeError(
F"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
return result
class snake_case_ ( a_ ):
pass
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__=False ) -> Any:
try:
a_ : Union[str, Any] = subprocess.check_output(SCREAMING_SNAKE_CASE__, stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(SCREAMING_SNAKE_CASE__, "decode" ):
a_ : Dict = output.decode("utf-8" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"""Command `{' '.join(SCREAMING_SNAKE_CASE__ )}` failed with the following error:\n\n{e.output.decode()}""" ) from e | 707 |
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 0 # The first color of the flag.
SCREAMING_SNAKE_CASE_ = 1 # The second color of the flag.
SCREAMING_SNAKE_CASE_ = 2 # The third color of the flag.
SCREAMING_SNAKE_CASE_ = (red, white, blue)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> list:
if not sequence:
return []
if len(SCREAMING_SNAKE_CASE__ ) == 1:
return list(SCREAMING_SNAKE_CASE__ )
a_ : Dict = 0
a_ : List[Any] = len(SCREAMING_SNAKE_CASE__ ) - 1
a_ : str = 0
while mid <= high:
if sequence[mid] == colors[0]:
a_ , a_ : int = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
a_ , a_ : List[Any] = sequence[high], sequence[mid]
high -= 1
else:
a_ : Dict = F"""The elements inside the sequence must contains only {colors} values"""
raise ValueError(SCREAMING_SNAKE_CASE__ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE_ = input("""Enter numbers separated by commas:\n""").strip()
SCREAMING_SNAKE_CASE_ = [int(item.strip()) for item in user_input.split(""",""")]
print(F"""{dutch_national_flag_sort(unsorted)}""") | 370 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
_lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
_lowerCAmelCase = "\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained(\"kandinsky-community/Kandinsky-2-1-prior\")\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"red cat, 4k photo\"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-1\")\n >>> pipe.to(\"cuda\")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save(\"cat.png\")\n ```\n"
def UpperCamelCase ( a , a , a=8 ) -> Dict:
'''simple docstring'''
__magic_name__ = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
__magic_name__ = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class _SCREAMING_SNAKE_CASE ( __a ):
def __init__( self : int , a__ : MultilingualCLIP , a__ : XLMRobertaTokenizer , a__ : UNetaDConditionModel , a__ : Union[DDIMScheduler, DDPMScheduler] , a__ : VQModel , ):
super().__init__()
self.register_modules(
text_encoder=a__ , tokenizer=a__ , unet=a__ , scheduler=a__ , movq=a__ , )
__magic_name__ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def snake_case__ ( self : int , a__ : Dict , a__ : Tuple , a__ : int , a__ : int , a__ : Dict , a__ : List[Any] ):
if latents is None:
__magic_name__ = randn_tensor(a__ , generator=a__ , device=a__ , dtype=a__ )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
__magic_name__ = latents.to(a__ )
__magic_name__ = latents * scheduler.init_noise_sigma
return latents
def snake_case__ ( self : Any , a__ : List[Any] , a__ : str , a__ : Any , a__ : Any , a__ : List[Any]=None , ):
__magic_name__ = len(a__ ) if isinstance(a__ , a__ ) else 1
# get prompt text embeddings
__magic_name__ = self.tokenizer(
a__ , padding='''max_length''' , truncation=a__ , max_length=77 , return_attention_mask=a__ , add_special_tokens=a__ , return_tensors='''pt''' , )
__magic_name__ = text_inputs.input_ids
__magic_name__ = self.tokenizer(a__ , padding='''longest''' , return_tensors='''pt''' ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(a__ , a__ ):
__magic_name__ = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
__magic_name__ = text_input_ids.to(a__ )
__magic_name__ = text_inputs.attention_mask.to(a__ )
__magic_name__ , __magic_name__ = self.text_encoder(
input_ids=a__ , attention_mask=a__ )
__magic_name__ = prompt_embeds.repeat_interleave(a__ , dim=0 )
__magic_name__ = text_encoder_hidden_states.repeat_interleave(a__ , dim=0 )
__magic_name__ = text_mask.repeat_interleave(a__ , dim=0 )
if do_classifier_free_guidance:
__magic_name__ = 42
if negative_prompt is None:
__magic_name__ = [''''''] * batch_size
elif type(a__ ) is not type(a__ ):
raise TypeError(
F'''`negative_prompt` should be the same type to `prompt`, but got {type(a__ )} !='''
F''' {type(a__ )}.''' )
elif isinstance(a__ , a__ ):
__magic_name__ = [negative_prompt]
elif batch_size != len(a__ ):
raise ValueError(
F'''`negative_prompt`: {negative_prompt} has batch size {len(a__ )}, but `prompt`:'''
F''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
''' the batch size of `prompt`.''' )
else:
__magic_name__ = negative_prompt
__magic_name__ = self.tokenizer(
a__ , padding='''max_length''' , max_length=77 , truncation=a__ , return_attention_mask=a__ , add_special_tokens=a__ , return_tensors='''pt''' , )
__magic_name__ = uncond_input.input_ids.to(a__ )
__magic_name__ = uncond_input.attention_mask.to(a__ )
__magic_name__ , __magic_name__ = self.text_encoder(
input_ids=a__ , attention_mask=a__ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__magic_name__ = negative_prompt_embeds.shape[1]
__magic_name__ = negative_prompt_embeds.repeat(1 , a__ )
__magic_name__ = negative_prompt_embeds.view(batch_size * num_images_per_prompt , a__ )
__magic_name__ = uncond_text_encoder_hidden_states.shape[1]
__magic_name__ = uncond_text_encoder_hidden_states.repeat(1 , a__ , 1 )
__magic_name__ = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , a__ , -1 )
__magic_name__ = uncond_text_mask.repeat_interleave(a__ , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__magic_name__ = torch.cat([negative_prompt_embeds, prompt_embeds] )
__magic_name__ = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
__magic_name__ = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def snake_case__ ( self : Optional[Any] , a__ : Optional[Any]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
__magic_name__ = torch.device(F'''cuda:{gpu_id}''' )
__magic_name__ = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a__ , a__ )
def snake_case__ ( self : Dict , a__ : List[str]=0 ):
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
__magic_name__ = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=a__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__magic_name__ = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
__magic_name__ , __magic_name__ = cpu_offload_with_hook(a__ , a__ , prev_module_hook=a__ )
if self.safety_checker is not None:
__magic_name__ , __magic_name__ = cpu_offload_with_hook(self.safety_checker , a__ , prev_module_hook=a__ )
# We'll offload the last model manually.
__magic_name__ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def snake_case__ ( self : List[Any] ):
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(a__ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(a__ )
def __call__( self : Tuple , a__ : Union[str, List[str]] , a__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , a__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , a__ : Optional[Union[str, List[str]]] = None , a__ : int = 512 , a__ : int = 512 , a__ : int = 100 , a__ : float = 4.0 , a__ : int = 1 , a__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a__ : Optional[torch.FloatTensor] = None , a__ : Optional[str] = "pil" , a__ : bool = True , ):
if isinstance(a__ , a__ ):
__magic_name__ = 1
elif isinstance(a__ , a__ ):
__magic_name__ = len(a__ )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(a__ )}''' )
__magic_name__ = self._execution_device
__magic_name__ = batch_size * num_images_per_prompt
__magic_name__ = guidance_scale > 1.0
__magic_name__ , __magic_name__ , __magic_name__ = self._encode_prompt(
a__ , a__ , a__ , a__ , a__ )
if isinstance(a__ , a__ ):
__magic_name__ = torch.cat(a__ , dim=0 )
if isinstance(a__ , a__ ):
__magic_name__ = torch.cat(a__ , dim=0 )
if do_classifier_free_guidance:
__magic_name__ = image_embeds.repeat_interleave(a__ , dim=0 )
__magic_name__ = negative_image_embeds.repeat_interleave(a__ , dim=0 )
__magic_name__ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=a__ )
self.scheduler.set_timesteps(a__ , device=a__ )
__magic_name__ = self.scheduler.timesteps
__magic_name__ = self.unet.config.in_channels
__magic_name__ , __magic_name__ = get_new_h_w(a__ , a__ , self.movq_scale_factor )
# create initial latent
__magic_name__ = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , a__ , a__ , a__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(a__ ) ):
# expand the latents if we are doing classifier free guidance
__magic_name__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__magic_name__ = {'''text_embeds''': prompt_embeds, '''image_embeds''': image_embeds}
__magic_name__ = self.unet(
sample=a__ , timestep=a__ , encoder_hidden_states=a__ , added_cond_kwargs=a__ , return_dict=a__ , )[0]
if do_classifier_free_guidance:
__magic_name__ , __magic_name__ = noise_pred.split(latents.shape[1] , dim=1 )
__magic_name__ , __magic_name__ = noise_pred.chunk(2 )
__magic_name__ , __magic_name__ = variance_pred.chunk(2 )
__magic_name__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__magic_name__ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__magic_name__ , __magic_name__ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__magic_name__ = self.scheduler.step(
a__ , a__ , a__ , generator=a__ , ).prev_sample
# post-processing
__magic_name__ = self.movq.decode(a__ , force_not_quantize=a__ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
__magic_name__ = image * 0.5 + 0.5
__magic_name__ = image.clamp(0 , 1 )
__magic_name__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__magic_name__ = self.numpy_to_pil(a__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a__ )
| 432 |
'''simple docstring'''
from __future__ import annotations
from math import pi
def UpperCamelCase ( a , a , a ) -> dict[str, float]:
'''simple docstring'''
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if inductance < 0:
raise ValueError('''Inductance cannot be negative''' )
if frequency < 0:
raise ValueError('''Frequency cannot be negative''' )
if reactance < 0:
raise ValueError('''Inductive reactance cannot be negative''' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 432 | 1 |
'''simple docstring'''
from __future__ import annotations
import typing
from collections import Counter
def __A ( lowerCAmelCase_ ):
_UpperCAmelCase : typing.Counter[int] = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(lowerCAmelCase_ , max_perimeter + 1 ):
_UpperCAmelCase : Any = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(lowerCAmelCase_ ):
_UpperCAmelCase : Dict = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def __A ( lowerCAmelCase_ = 1000 ):
_UpperCAmelCase : int = pythagorean_triple(lowerCAmelCase_ )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(F"Perimeter {solution()} has maximum solutions")
| 709 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowerCAmelCase ( __a , unittest.TestCase ):
snake_case : List[str] = KandinskyVaaControlnetImgaImgPipeline
snake_case : str = ["""image_embeds""", """negative_image_embeds""", """image""", """hint"""]
snake_case : Tuple = ["""image_embeds""", """negative_image_embeds""", """image""", """hint"""]
snake_case : List[Any] = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
snake_case : Tuple = False
@property
def snake_case_ (self ):
return 3_2
@property
def snake_case_ (self ):
return 3_2
@property
def snake_case_ (self ):
return self.time_input_dim
@property
def snake_case_ (self ):
return self.time_input_dim * 4
@property
def snake_case_ (self ):
return 1_0_0
@property
def snake_case_ (self ):
torch.manual_seed(0 )
_UpperCAmelCase : Dict = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_UpperCAmelCase : Dict = UNetaDConditionModel(**lowerCAmelCase__ )
return model
@property
def snake_case_ (self ):
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def snake_case_ (self ):
torch.manual_seed(0 )
_UpperCAmelCase : List[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def snake_case_ (self ):
_UpperCAmelCase : Union[str, Any] = self.dummy_unet
_UpperCAmelCase : str = self.dummy_movq
_UpperCAmelCase : Union[str, Any] = {
"""num_train_timesteps""": 1_0_0_0,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_0_0_8_5,
"""beta_end""": 0.0_1_2,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
_UpperCAmelCase : List[str] = DDIMScheduler(**lowerCAmelCase__ )
_UpperCAmelCase : Dict = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__=0 ):
_UpperCAmelCase : Dict = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowerCAmelCase__ )
# create init_image
_UpperCAmelCase : str = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
_UpperCAmelCase : Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCAmelCase : Tuple = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert("""RGB""" ).resize((2_5_6, 2_5_6) )
# create hint
_UpperCAmelCase : Union[str, Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
if str(lowerCAmelCase__ ).startswith("""mps""" ):
_UpperCAmelCase : int = torch.manual_seed(lowerCAmelCase__ )
else:
_UpperCAmelCase : List[Any] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
_UpperCAmelCase : Tuple = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 6_4,
"""width""": 6_4,
"""num_inference_steps""": 1_0,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def snake_case_ (self ):
_UpperCAmelCase : Dict = """cpu"""
_UpperCAmelCase : str = self.get_dummy_components()
_UpperCAmelCase : Tuple = self.pipeline_class(**lowerCAmelCase__ )
_UpperCAmelCase : str = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCAmelCase : Tuple = pipe(**self.get_dummy_inputs(lowerCAmelCase__ ) )
_UpperCAmelCase : Optional[Any] = output.images
_UpperCAmelCase : Tuple = pipe(
**self.get_dummy_inputs(lowerCAmelCase__ ) , return_dict=lowerCAmelCase__ , )[0]
_UpperCAmelCase : str = image[0, -3:, -3:, -1]
_UpperCAmelCase : List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_UpperCAmelCase : Dict = np.array(
[0.5_4_9_8_5_0_3_4, 0.5_5_5_0_9_3_6_5, 0.5_2_5_6_1_5_0_4, 0.5_5_7_0_4_9_4, 0.5_5_9_3_8_1_8, 0.5_2_6_3_9_7_9, 0.5_0_2_8_5_6_4_3, 0.5_0_6_9_8_4_6, 0.5_1_1_9_6_7_3_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def snake_case_ (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ (self ):
_UpperCAmelCase : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy""" )
_UpperCAmelCase : Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
_UpperCAmelCase : str = init_image.resize((5_1_2, 5_1_2) )
_UpperCAmelCase : Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
_UpperCAmelCase : Any = torch.from_numpy(np.array(lowerCAmelCase__ ) ).float() / 2_5_5.0
_UpperCAmelCase : List[str] = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
_UpperCAmelCase : List[Any] = """A robot, 4k photo"""
_UpperCAmelCase : Optional[Any] = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(lowerCAmelCase__ )
_UpperCAmelCase : Any = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa )
_UpperCAmelCase : List[str] = pipeline.to(lowerCAmelCase__ )
pipeline.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
_UpperCAmelCase , _UpperCAmelCase : List[str] = pipe_prior(
lowerCAmelCase__ , image=lowerCAmelCase__ , strength=0.8_5 , generator=lowerCAmelCase__ , negative_prompt="""""" , ).to_tuple()
_UpperCAmelCase : Optional[Any] = pipeline(
image=lowerCAmelCase__ , image_embeds=lowerCAmelCase__ , negative_image_embeds=lowerCAmelCase__ , hint=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=1_0_0 , height=5_1_2 , width=5_1_2 , strength=0.5 , output_type="""np""" , )
_UpperCAmelCase : Optional[Any] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
| 156 | 0 |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class _a ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , _snake_case , _snake_case = None , _snake_case = None , _snake_case = True , _snake_case = None , _snake_case = False , _snake_case = None , _snake_case = True , _snake_case = "arrow" , **_snake_case , ):
super().__init__(
split=_snake_case , features=_snake_case , cache_dir=_snake_case , keep_in_memory=_snake_case , streaming=_snake_case , **_snake_case , )
_UpperCAmelCase =load_from_cache_file
_UpperCAmelCase =file_format
_UpperCAmelCase =Spark(
df=_snake_case , features=_snake_case , cache_dir=_snake_case , working_dir=_snake_case , **_snake_case , )
def SCREAMING_SNAKE_CASE ( self ):
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
_UpperCAmelCase =None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=_snake_case , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 408 |
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
UpperCAmelCase__ : str = sys.version_info >= (3, 10)
def A ( UpperCamelCase_ : Any=None , UpperCamelCase_ : List[Any]=None ) -> Optional[int]:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=UpperCamelCase_ )
@dataclass
class A :
snake_case__ :int
snake_case__ :float
snake_case__ :str
snake_case__ :bool
@dataclass
class A :
snake_case__ :int = 42
snake_case__ :str = field(default='toto' , metadata={'help': 'help message'} )
@dataclass
class A :
snake_case__ :bool = False
snake_case__ :bool = True
snake_case__ :Optional[bool] = None
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Any = 'titi'
snake_case__ :Optional[int] = 'toto'
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Union[str, Any] = 'titi'
snake_case__ :str = 'toto'
snake_case__ :int = 42
@dataclass
class A :
snake_case__ :BasicEnum = "toto"
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = BasicEnum(self.foo )
@dataclass
class A :
snake_case__ :MixedTypeEnum = "toto"
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = MixedTypeEnum(self.foo )
@dataclass
class A :
snake_case__ :Optional[int] = None
snake_case__ :Optional[float] = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'help message'} )
snake_case__ :Optional[str] = None
snake_case__ :Optional[List[str]] = list_field(default=[] )
snake_case__ :Optional[List[int]] = list_field(default=[] )
@dataclass
class A :
snake_case__ :List[int] = list_field(default=[] )
snake_case__ :List[int] = list_field(default=[1, 2, 3] )
snake_case__ :List[str] = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
snake_case__ :List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class A :
snake_case__ :List[int] = field()
snake_case__ :str = field()
snake_case__ :BasicEnum = field()
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = BasicEnum(self.required_enum )
@dataclass
class A :
snake_case__ :int
snake_case__ :"BasicEnum" = field()
snake_case__ :"Optional[bool]" = None
snake_case__ :"str" = field(default='toto' , metadata={'help': 'help message'} )
snake_case__ :"List[str]" = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class A :
snake_case__ :bool = False
snake_case__ :bool = True
snake_case__ :bool | None = None
@dataclass
class A :
snake_case__ :int | None = None
snake_case__ :float | None = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'help message'} )
snake_case__ :str | None = None
snake_case__ :list[str] | None = list_field(default=[] )
snake_case__ :list[int] | None = list_field(default=[] )
class A ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : argparse.ArgumentParser , __magic_name__ : argparse.ArgumentParser ):
"""simple docstring"""
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
lowerCAmelCase__ = {k: v for k, v in vars(__magic_name__ ).items() if k != "container"}
lowerCAmelCase__ = {k: v for k, v in vars(__magic_name__ ).items() if k != "container"}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("choices" , __magic_name__ ) and yy.get("choices" , __magic_name__ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["type"](__magic_name__ ) , yy["type"](__magic_name__ ) )
del xx["type"], yy["type"]
self.assertEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument("--bar" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument("--baz" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument("--flag" , type=__magic_name__ , default=__magic_name__ , const=__magic_name__ , nargs="?" )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = ["--foo", "1", "--baz", "quux", "--bar", "0.5"]
((lowerCAmelCase__) ,) = parser.parse_args_into_dataclasses(__magic_name__ , look_for_args_file=__magic_name__ )
self.assertFalse(example.flag )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , default=42 , type=__magic_name__ )
expected.add_argument("--baz" , default="toto" , type=__magic_name__ , help="help message" )
self.argparsersEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , type=__magic_name__ , default=__magic_name__ , const=__magic_name__ , nargs="?" )
expected.add_argument("--baz" , type=__magic_name__ , default=__magic_name__ , const=__magic_name__ , nargs="?" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("--no_baz" , action="store_false" , default=__magic_name__ , dest="baz" )
expected.add_argument("--opt" , type=__magic_name__ , default=__magic_name__ )
lowerCAmelCase__ = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__magic_name__ )
for dataclass_type in dataclass_types:
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_args([] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
lowerCAmelCase__ = parser.parse_args(["--foo", "--no_baz"] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
lowerCAmelCase__ = parser.parse_args(["--foo", "--baz"] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
lowerCAmelCase__ = parser.parse_args(["--foo", "True", "--baz", "True", "--opt", "True"] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
lowerCAmelCase__ = parser.parse_args(["--foo", "False", "--baz", "False", "--opt", "False"] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=["titi", "toto", 42] , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
lowerCAmelCase__ = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
lowerCAmelCase__ = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
lowerCAmelCase__ = parser.parse_args_into_dataclasses(["--foo", "titi"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
lowerCAmelCase__ = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
lowerCAmelCase__ = parser.parse_args_into_dataclasses(["--foo", "42"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
@dataclass
class A :
snake_case__ :Literal["titi", "toto", 42] = "toto"
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=("titi", "toto", 42) , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
lowerCAmelCase__ = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
lowerCAmelCase__ = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo_int" , nargs="+" , default=[] , type=__magic_name__ )
expected.add_argument("--bar_int" , nargs="+" , default=[1, 2, 3] , type=__magic_name__ )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=__magic_name__ )
expected.add_argument("--foo_float" , nargs="+" , default=[0.1, 0.2, 0.3] , type=__magic_name__ )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_args([] )
self.assertEqual(
__magic_name__ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["Hallo", "Bonjour", "Hello"] , foo_float=[0.1, 0.2, 0.3] ) , )
lowerCAmelCase__ = parser.parse_args("--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7".split() )
self.assertEqual(__magic_name__ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["a", "b", "c"] , foo_float=[0.1, 0.7] ) )
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , default=__magic_name__ , type=__magic_name__ )
expected.add_argument("--bar" , default=__magic_name__ , type=__magic_name__ , help="help message" )
expected.add_argument("--baz" , default=__magic_name__ , type=__magic_name__ )
expected.add_argument("--ces" , nargs="+" , default=[] , type=__magic_name__ )
expected.add_argument("--des" , nargs="+" , default=[] , type=__magic_name__ )
lowerCAmelCase__ = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__magic_name__ )
for dataclass_type in dataclass_types:
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_args([] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , bar=__magic_name__ , baz=__magic_name__ , ces=[] , des=[] ) )
lowerCAmelCase__ = parser.parse_args("--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3".split() )
self.assertEqual(__magic_name__ , Namespace(foo=12 , bar=3.14 , baz="42" , ces=["a", "b", "c"] , des=[1, 2, 3] ) )
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--required_list" , nargs="+" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument("--required_str" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=__magic_name__ , )
self.argparsersEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=__magic_name__ , )
expected.add_argument("--opt" , type=__magic_name__ , default=__magic_name__ )
expected.add_argument("--baz" , default="toto" , type=__magic_name__ , help="help message" )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=__magic_name__ )
self.argparsersEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
lowerCAmelCase__ = parser.parse_dict(__magic_name__ )[0]
lowerCAmelCase__ = BasicExample(**__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
"extra": 42,
}
self.assertRaises(__magic_name__ , parser.parse_dict , __magic_name__ , allow_extra_keys=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ = os.path.join(__magic_name__ , "temp_json" )
os.mkdir(__magic_name__ )
with open(temp_local_path + ".json" , "w+" ) as f:
json.dump(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_yaml_file(Path(temp_local_path + ".json" ) )[0]
lowerCAmelCase__ = BasicExample(**__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ = os.path.join(__magic_name__ , "temp_yaml" )
os.mkdir(__magic_name__ )
with open(temp_local_path + ".yaml" , "w+" ) as f:
yaml.dump(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_yaml_file(Path(temp_local_path + ".yaml" ) )[0]
lowerCAmelCase__ = BasicExample(**__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
| 48 | 0 |
'''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
_lowercase = get_tests_dir() + """/test_data/fsmt/fsmt_val_data.json"""
with io.open(filename, """r""", encoding="""utf-8""") as f:
_lowercase = json.load(f)
@require_torch
class a_ ( unittest.TestCase ):
def lowercase__ ( self : Optional[Any] , __lowerCAmelCase : Union[str, Any] ):
return FSMTTokenizer.from_pretrained(__lowerCAmelCase )
def lowercase__ ( self : Any , __lowerCAmelCase : Union[str, Any] ):
__snake_case = FSMTForConditionalGeneration.from_pretrained(__lowerCAmelCase ).to(__lowerCAmelCase )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def lowercase__ ( self : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] ):
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
__snake_case = F'facebook/wmt19-{pair}'
__snake_case = self.get_tokenizer(__lowerCAmelCase )
__snake_case = self.get_model(__lowerCAmelCase )
__snake_case = bleu_data[pair]['src']
__snake_case = bleu_data[pair]['tgt']
__snake_case = tokenizer(__lowerCAmelCase , return_tensors='pt' , truncation=__lowerCAmelCase , padding='longest' ).to(__lowerCAmelCase )
__snake_case = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
__snake_case = tokenizer.batch_decode(
__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )
__snake_case = calculate_bleu(__lowerCAmelCase , __lowerCAmelCase )
print(__lowerCAmelCase )
self.assertGreaterEqual(scores['bleu'] , __lowerCAmelCase )
| 427 |
'''simple docstring'''
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class a_ ( UpperCAmelCase__ ):
lowercase_ : int = ['''image_processor''', '''tokenizer''']
lowercase_ : Dict = '''AutoImageProcessor'''
lowercase_ : Dict = '''AutoTokenizer'''
def __init__( self : int , __lowerCAmelCase : Any=None , __lowerCAmelCase : Tuple=None , **__lowerCAmelCase : List[Any] ):
__snake_case = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __lowerCAmelCase , )
__snake_case = kwargs.pop('feature_extractor' )
__snake_case = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
__snake_case = self.image_processor
__snake_case = False
def __call__( self : Dict , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : List[str] ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__lowerCAmelCase , **__lowerCAmelCase )
__snake_case = kwargs.pop('images' , __lowerCAmelCase )
__snake_case = kwargs.pop('text' , __lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
__snake_case = args[0]
__snake_case = args[1:]
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
__snake_case = self.image_processor(__lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase )
if text is not None:
__snake_case = self.tokenizer(__lowerCAmelCase , **__lowerCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
__snake_case = encodings['input_ids']
return inputs
def lowercase__ ( self : str , *__lowerCAmelCase : str , **__lowerCAmelCase : Tuple ):
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def lowercase__ ( self : Union[str, Any] , *__lowerCAmelCase : Dict , **__lowerCAmelCase : Any ):
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@contextmanager
def lowercase__ ( self : Any ):
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your images inputs, or in a separate call.' )
__snake_case = True
__snake_case = self.tokenizer
yield
__snake_case = self.image_processor
__snake_case = False
def lowercase__ ( self : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : List[Any]=None ):
if added_vocab is None:
__snake_case = self.tokenizer.get_added_vocab()
__snake_case = {}
while tokens:
__snake_case = re.search(r'<s_(.*?)>' , __lowerCAmelCase , re.IGNORECASE )
if start_token is None:
break
__snake_case = start_token.group(1 )
__snake_case = re.search(rF'</s_{key}>' , __lowerCAmelCase , re.IGNORECASE )
__snake_case = start_token.group()
if end_token is None:
__snake_case = tokens.replace(__lowerCAmelCase , '' )
else:
__snake_case = end_token.group()
__snake_case = re.escape(__lowerCAmelCase )
__snake_case = re.escape(__lowerCAmelCase )
__snake_case = re.search(F'{start_token_escaped}(.*?){end_token_escaped}' , __lowerCAmelCase , re.IGNORECASE )
if content is not None:
__snake_case = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
__snake_case = self.tokenajson(__lowerCAmelCase , is_inner_value=__lowerCAmelCase , added_vocab=__lowerCAmelCase )
if value:
if len(__lowerCAmelCase ) == 1:
__snake_case = value[0]
__snake_case = value
else: # leaf nodes
__snake_case = []
for leaf in content.split(r'<sep/>' ):
__snake_case = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
__snake_case = leaf[1:-2] # for categorical special tokens
output[key].append(__lowerCAmelCase )
if len(output[key] ) == 1:
__snake_case = output[key][0]
__snake_case = tokens[tokens.find(__lowerCAmelCase ) + len(__lowerCAmelCase ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=__lowerCAmelCase , added_vocab=__lowerCAmelCase )
if len(__lowerCAmelCase ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def lowercase__ ( self : Dict ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __lowerCAmelCase , )
return self.image_processor_class
@property
def lowercase__ ( self : Union[str, Any] ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __lowerCAmelCase , )
return self.image_processor
| 427 | 1 |
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"linear": PIL.Image.Resampling.BILINEAR,
"bilinear": PIL.Image.Resampling.BILINEAR,
"bicubic": PIL.Image.Resampling.BICUBIC,
"lanczos": PIL.Image.Resampling.LANCZOS,
"nearest": PIL.Image.Resampling.NEAREST,
}
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"linear": PIL.Image.LINEAR,
"bilinear": PIL.Image.BILINEAR,
"bicubic": PIL.Image.BICUBIC,
"lanczos": PIL.Image.LANCZOS,
"nearest": PIL.Image.NEAREST,
}
def _a ( lowercase__ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = (images / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ : List[Any] = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = numpy_to_pil(lowercase__ )
return images
def _a ( lowercase__ : Optional[Any] ):
'''simple docstring'''
if images.ndim == 3:
SCREAMING_SNAKE_CASE__ : Dict = images[None, ...]
SCREAMING_SNAKE_CASE__ : List[str] = (images * 2_55).round().astype('uint8' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
SCREAMING_SNAKE_CASE__ : List[Any] = [Image.fromarray(image.squeeze() , mode='L' ) for image in images]
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [Image.fromarray(lowercase__ ) for image in images]
return pil_images
| 85 | from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case :
def __init__( self : Tuple , a_ : int , a_ : Optional[int]=3 , a_ : Tuple=32 , a_ : Any=3 , a_ : Tuple=10 , a_ : Optional[int]=[10, 20, 30, 40] , a_ : List[Any]=[1, 1, 2, 1] , a_ : int=True , a_ : Optional[Any]=True , a_ : Any="relu" , a_ : int=3 , a_ : List[Any]=None , )-> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = parent
SCREAMING_SNAKE_CASE__ : Optional[int] = batch_size
SCREAMING_SNAKE_CASE__ : int = image_size
SCREAMING_SNAKE_CASE__ : Tuple = num_channels
SCREAMING_SNAKE_CASE__ : Tuple = embeddings_size
SCREAMING_SNAKE_CASE__ : str = hidden_sizes
SCREAMING_SNAKE_CASE__ : Optional[int] = depths
SCREAMING_SNAKE_CASE__ : Optional[Any] = is_training
SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE__ : Dict = hidden_act
SCREAMING_SNAKE_CASE__ : Tuple = num_labels
SCREAMING_SNAKE_CASE__ : List[Any] = scope
SCREAMING_SNAKE_CASE__ : str = len(a_ )
def __lowercase( self : Union[str, Any] )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : Any = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Any = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE__ : Tuple = self.get_config()
return config, pixel_values, labels
def __lowercase( self : str )-> str:
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def __lowercase( self : List[str] , a_ : int , a_ : Any , a_ : Optional[Any] )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = TFRegNetModel(config=a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a_ , training=a_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __lowercase( self : Union[str, Any] , a_ : Dict , a_ : int , a_ : Optional[Any] )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.num_labels
SCREAMING_SNAKE_CASE__ : Tuple = TFRegNetForImageClassification(a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ , labels=a_ , training=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowercase( self : List[str] )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = config_and_inputs
SCREAMING_SNAKE_CASE__ : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class snake_case ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
lowercase_ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
lowercase_ = (
{'feature-extraction': TFRegNetModel, 'image-classification': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def __lowercase( self : int )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = TFRegNetModelTester(self )
SCREAMING_SNAKE_CASE__ : int = ConfigTester(self , config_class=a_ , has_text_modality=a_ )
def __lowercase( self : List[Any] )-> Tuple:
"""simple docstring"""
return
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def __lowercase( self : str )-> Optional[int]:
"""simple docstring"""
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , )
@slow
def __lowercase( self : Any )-> List[Any]:
"""simple docstring"""
super().test_keras_fit()
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def __lowercase( self : Any )-> List[Any]:
"""simple docstring"""
pass
def __lowercase( self : Tuple )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : List[Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , a_ )
def __lowercase( self : str )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def __lowercase( self : List[Any] )-> Optional[Any]:
"""simple docstring"""
def check_hidden_states_output(a_ : int , a_ : Union[str, Any] , a_ : Tuple ):
SCREAMING_SNAKE_CASE__ : Any = model_class(a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(**self._prepare_for_class(a_ , a_ ) , training=a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(a_ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Dict = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
SCREAMING_SNAKE_CASE__ : List[Any] = layer_type
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
check_hidden_states_output(a_ , a_ , a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ : int = True
check_hidden_states_output(a_ , a_ , a_ )
def __lowercase( self : Optional[int] )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(a_ : str , a_ : Tuple , a_ : Optional[int] , a_ : Union[str, Any]={} ):
SCREAMING_SNAKE_CASE__ : int = model(a_ , return_dict=a_ , **a_ )
SCREAMING_SNAKE_CASE__ : str = model(a_ , return_dict=a_ , **a_ ).to_tuple()
def recursive_check(a_ : List[Any] , a_ : int ):
if isinstance(a_ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(a_ , a_ ):
recursive_check(a_ , a_ )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(a_ , a_ ) ) , msg=(
'Tuple and dict output are not equal. Difference:'
F''' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}'''
) , )
recursive_check(a_ , a_ )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(a_ )
SCREAMING_SNAKE_CASE__ : int = self._prepare_for_class(a_ , a_ )
SCREAMING_SNAKE_CASE__ : Dict = self._prepare_for_class(a_ , a_ )
check_equivalence(a_ , a_ , a_ )
SCREAMING_SNAKE_CASE__ : List[str] = self._prepare_for_class(a_ , a_ , return_labels=a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = self._prepare_for_class(a_ , a_ , return_labels=a_ )
check_equivalence(a_ , a_ , a_ )
SCREAMING_SNAKE_CASE__ : str = self._prepare_for_class(a_ , a_ )
SCREAMING_SNAKE_CASE__ : List[str] = self._prepare_for_class(a_ , a_ )
check_equivalence(a_ , a_ , a_ , {'output_hidden_states': True} )
SCREAMING_SNAKE_CASE__ : int = self._prepare_for_class(a_ , a_ , return_labels=a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._prepare_for_class(a_ , a_ , return_labels=a_ )
check_equivalence(a_ , a_ , a_ , {'output_hidden_states': True} )
def __lowercase( self : str )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
@slow
def __lowercase( self : Any )-> List[str]:
"""simple docstring"""
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Optional[int] = TFRegNetModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class snake_case ( unittest.TestCase ):
@cached_property
def __lowercase( self : List[Any] )-> int:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __lowercase( self : Any )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
SCREAMING_SNAKE_CASE__ : List[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE__ : Any = prepare_img()
SCREAMING_SNAKE_CASE__ : str = image_processor(images=a_ , return_tensors='tf' )
# forward pass
SCREAMING_SNAKE_CASE__ : Tuple = model(**a_ , training=a_ )
# verify the logits
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , a_ )
SCREAMING_SNAKE_CASE__ : Any = tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3] , a_ , atol=1e-4 )
| 85 | 1 |
"""simple docstring"""
def __snake_case ( SCREAMING_SNAKE_CASE: float , SCREAMING_SNAKE_CASE: float ):
if mass < 0:
raise ValueError('The mass of a body cannot be negative' )
return 0.5 * mass * abs(lowercase_ ) * abs(lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 720 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __lowerCamelCase ( self : Any ) -> Dict:
"""simple docstring"""
_lowerCAmelCase = 10
def __lowerCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
_lowerCAmelCase = [1, 2, 3, 4]
_lowerCAmelCase = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(UpperCAmelCase_ , self.block_size , 0 ) , UpperCAmelCase_ )
def __lowerCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
_lowerCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
_lowerCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCAmelCase_ , self.block_size , 0 ) , UpperCAmelCase_ )
def __lowerCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
_lowerCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
_lowerCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCAmelCase_ , self.block_size , 0 ) , UpperCAmelCase_ )
def __lowerCamelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
_lowerCAmelCase = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.'
_lowerCAmelCase , _lowerCAmelCase = process_story(UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , [] )
def __lowerCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase = ''
_lowerCAmelCase , _lowerCAmelCase = process_story(UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , [] )
self.assertEqual(UpperCAmelCase_ , [] )
def __lowerCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
_lowerCAmelCase = (
'It was the year of Our Lord one thousand seven hundred and '
'seventy-five\n\nSpiritual revelations were conceded to England '
'at that favoured period, as at this.\n@highlight\n\nIt was the best of times'
)
_lowerCAmelCase , _lowerCAmelCase = process_story(UpperCAmelCase_ )
_lowerCAmelCase = [
'It was the year of Our Lord one thousand seven hundred and seventy-five.',
'Spiritual revelations were conceded to England at that favoured period, as at this.',
]
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
_lowerCAmelCase = ['It was the best of times.']
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def __lowerCamelCase ( self : List[str] ) -> int:
"""simple docstring"""
_lowerCAmelCase = torch.tensor([1, 2, 3, 4] )
_lowerCAmelCase = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase_ , 0 ).numpy() , expected.numpy() )
def __lowerCamelCase ( self : int ) -> str:
"""simple docstring"""
_lowerCAmelCase = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
_lowerCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase_ , 23 ).numpy() , expected.numpy() )
def __lowerCamelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
_lowerCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase_ , 1 ).numpy() , expected.numpy() )
def __lowerCamelCase ( self : str ) -> int:
"""simple docstring"""
_lowerCAmelCase = 101
_lowerCAmelCase = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
_lowerCAmelCase = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
_lowerCAmelCase = compute_token_type_ids(UpperCAmelCase_ , UpperCAmelCase_ )
np.testing.assert_array_equal(UpperCAmelCase_ , UpperCAmelCase_ )
| 491 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
_lowerCAmelCase : Dict = {
"""openai/imagegpt-small""": """""",
"""openai/imagegpt-medium""": """""",
"""openai/imagegpt-large""": """""",
}
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ ='''imagegpt'''
SCREAMING_SNAKE_CASE_ =['''past_key_values''']
SCREAMING_SNAKE_CASE_ ={
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Optional[int] , snake_case__ : List[str]=5_1_2 + 1 , snake_case__ : List[str]=3_2 * 3_2 , snake_case__ : Tuple=5_1_2 , snake_case__ : Tuple=2_4 , snake_case__ : str=8 , snake_case__ : Union[str, Any]=None , snake_case__ : List[Any]="quick_gelu" , snake_case__ : Any=0.1 , snake_case__ : List[Any]=0.1 , snake_case__ : int=0.1 , snake_case__ : str=1e-5 , snake_case__ : Any=0.02 , snake_case__ : Tuple=True , snake_case__ : Optional[Any]=True , snake_case__ : Tuple=False , snake_case__ : Dict=False , snake_case__ : str=False , **snake_case__ : Tuple , ):
'''simple docstring'''
UpperCAmelCase__ : Dict = vocab_size
UpperCAmelCase__ : Union[str, Any] = n_positions
UpperCAmelCase__ : Union[str, Any] = n_embd
UpperCAmelCase__ : int = n_layer
UpperCAmelCase__ : int = n_head
UpperCAmelCase__ : Dict = n_inner
UpperCAmelCase__ : Optional[Any] = activation_function
UpperCAmelCase__ : Union[str, Any] = resid_pdrop
UpperCAmelCase__ : int = embd_pdrop
UpperCAmelCase__ : Tuple = attn_pdrop
UpperCAmelCase__ : int = layer_norm_epsilon
UpperCAmelCase__ : str = initializer_range
UpperCAmelCase__ : Any = scale_attn_weights
UpperCAmelCase__ : Dict = use_cache
UpperCAmelCase__ : Tuple = scale_attn_by_inverse_layer_idx
UpperCAmelCase__ : int = reorder_and_upcast_attn
UpperCAmelCase__ : str = tie_word_embeddings
super().__init__(tie_word_embeddings=snake_case__ , **snake_case__ )
class lowerCAmelCase__ ( __magic_name__ ):
@property
def __a ( self : int ):
'''simple docstring'''
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
] )
def __a ( self : Dict , snake_case__ : "FeatureExtractionMixin" , snake_case__ : int = 1 , snake_case__ : int = -1 , snake_case__ : bool = False , snake_case__ : Optional["TensorType"] = None , snake_case__ : int = 3 , snake_case__ : int = 3_2 , snake_case__ : int = 3_2 , ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self._generate_dummy_images(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase__ : Dict = dict(preprocessor(images=snake_case__ , return_tensors=snake_case__ ) )
return inputs
| 438 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Any = logging.get_logger(__name__)
_lowerCAmelCase : Union[str, Any] = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ ='''megatron-bert'''
def __init__( self : Tuple , snake_case__ : List[Any]=2_9_0_5_6 , snake_case__ : Any=1_0_2_4 , snake_case__ : Tuple=2_4 , snake_case__ : Optional[Any]=1_6 , snake_case__ : List[str]=4_0_9_6 , snake_case__ : Union[str, Any]="gelu" , snake_case__ : int=0.1 , snake_case__ : Dict=0.1 , snake_case__ : str=5_1_2 , snake_case__ : Optional[Any]=2 , snake_case__ : Any=0.02 , snake_case__ : Optional[Any]=1e-12 , snake_case__ : str=0 , snake_case__ : Optional[Any]="absolute" , snake_case__ : Optional[Any]=True , **snake_case__ : Any , ):
'''simple docstring'''
super().__init__(pad_token_id=snake_case__ , **snake_case__ )
UpperCAmelCase__ : Union[str, Any] = vocab_size
UpperCAmelCase__ : Union[str, Any] = hidden_size
UpperCAmelCase__ : Optional[Any] = num_hidden_layers
UpperCAmelCase__ : Tuple = num_attention_heads
UpperCAmelCase__ : List[Any] = hidden_act
UpperCAmelCase__ : Optional[int] = intermediate_size
UpperCAmelCase__ : Dict = hidden_dropout_prob
UpperCAmelCase__ : str = attention_probs_dropout_prob
UpperCAmelCase__ : str = max_position_embeddings
UpperCAmelCase__ : List[str] = type_vocab_size
UpperCAmelCase__ : Tuple = initializer_range
UpperCAmelCase__ : str = layer_norm_eps
UpperCAmelCase__ : List[Any] = position_embedding_type
UpperCAmelCase__ : Dict = use_cache
| 438 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
a_ : Union[str, Any] = {
'configuration_longt5': ['LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LongT5Config', 'LongT5OnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : int = [
'LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongT5EncoderModel',
'LongT5ForConditionalGeneration',
'LongT5Model',
'LongT5PreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Dict = [
'FlaxLongT5ForConditionalGeneration',
'FlaxLongT5Model',
'FlaxLongT5PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
a_ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 678 |
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
a_ : str = True
except ImportError:
a_ : Optional[int] = False
try:
from torch.hub import _get_torch_home
a_ : Optional[Any] = _get_torch_home()
except ImportError:
a_ : List[Any] = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch'))
)
a_ : Any = os.path.join(torch_cache_home, 'transformers')
a_ : Any = 'https://cdn.huggingface.co'
a_ : Any = 'https://s3.amazonaws.com/models.huggingface.co/bert'
a_ : int = '/'.join(str(Path(__file__).resolve()).split('/')[:-1])
a_ : Any = os.path.join(PATH, 'config.yaml')
a_ : Any = os.path.join(PATH, 'attributes.txt')
a_ : Any = os.path.join(PATH, 'objects.txt')
a_ : List[Any] = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path)
a_ : Any = os.getenv('PYTORCH_TRANSFORMERS_CACHE', PYTORCH_PRETRAINED_BERT_CACHE)
a_ : Optional[int] = os.getenv('TRANSFORMERS_CACHE', PYTORCH_TRANSFORMERS_CACHE)
a_ : int = 'pytorch_model.bin'
a_ : Union[str, Any] = 'config.yaml'
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any]=OBJECTS , snake_case_ : str=ATTRIBUTES ):
__magic_name__ = []
with open(snake_case_ ) as f:
for object in f.readlines():
vg_classes.append(object.split(''',''' )[0].lower().strip() )
__magic_name__ = []
with open(snake_case_ ) as f:
for object in f.readlines():
vg_attrs.append(object.split(''',''' )[0].lower().strip() )
return vg_classes, vg_attrs
def _SCREAMING_SNAKE_CASE ( snake_case_ : int ):
__magic_name__ = OrderedDict()
with open(snake_case_ , '''rb''' ) as f:
__magic_name__ = pkl.load(snake_case_ )['''model''']
for k in copy.deepcopy(list(ckp.keys() ) ):
__magic_name__ = ckp.pop(snake_case_ )
if isinstance(snake_case_ , np.ndarray ):
__magic_name__ = torch.tensor(snake_case_ )
else:
assert isinstance(snake_case_ , torch.tensor ), type(snake_case_ )
__magic_name__ = v
return r
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
_a = {}
def __init__( self , A , A = "root" , A=0 ) -> List[str]:
'''simple docstring'''
__magic_name__ = name
__magic_name__ = level
__magic_name__ = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
__magic_name__ = copy.deepcopy(A )
__magic_name__ = copy.deepcopy(A )
if isinstance(A , A ):
__magic_name__ = Config(A , name=A , level=level + 1 )
__magic_name__ = v
setattr(self , A , A )
__magic_name__ = d
def __repr__( self ) -> Union[str, Any]:
'''simple docstring'''
return str(list((self._pointer.keys()) ) )
def __setattr__( self , A , A ) -> Tuple:
'''simple docstring'''
__magic_name__ = val
__magic_name__ = val
__magic_name__ = key.split('''.''' )
__magic_name__ = len(A ) - 1
__magic_name__ = self._pointer
if len(A ) > 1:
for i, l in enumerate(A ):
if hasattr(self , A ) and isinstance(getattr(self , A ) , A ):
setattr(getattr(self , A ) , '''.'''.join(levels[i:] ) , A )
if l == last_level:
__magic_name__ = val
else:
__magic_name__ = pointer[l]
def __A ( self ) -> List[Any]:
'''simple docstring'''
return self._pointer
def __A ( self , A , A ) -> Any:
'''simple docstring'''
with open(F'{file_name}' , '''w''' ) as stream:
dump(A , A )
def __A ( self , A , A ) -> List[Any]:
'''simple docstring'''
with open(F'{file_name}' , '''w''' ) as stream:
json.dump(A , A )
@staticmethod
def __A ( A ) -> Optional[Any]:
'''simple docstring'''
with open(A ) as stream:
__magic_name__ = load(A , Loader=A )
return data
def __str__( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ = ''' '''
if self._name != "root":
__magic_name__ = F'{t * (self._level-1)}{self._name}:\n'
else:
__magic_name__ = ''''''
__magic_name__ = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(A , A ):
r += F'{t * (self._level)}{v}\n'
self._level += 1
else:
r += F'{t * (self._level)}{k}: {v} ({type(A ).__name__})\n'
__magic_name__ = level
return r[:-1]
@classmethod
def __A ( cls , A , **A ) -> int:
'''simple docstring'''
__magic_name__ , __magic_name__ = cls.get_config_dict(A , **A )
return cls(A )
@classmethod
def __A ( cls , A , **A ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = kwargs.pop('''cache_dir''' , A )
__magic_name__ = kwargs.pop('''force_download''' , A )
__magic_name__ = kwargs.pop('''resume_download''' , A )
__magic_name__ = kwargs.pop('''proxies''' , A )
__magic_name__ = kwargs.pop('''local_files_only''' , A )
if os.path.isdir(A ):
__magic_name__ = os.path.join(A , A )
elif os.path.isfile(A ) or is_remote_url(A ):
__magic_name__ = pretrained_model_name_or_path
else:
__magic_name__ = hf_bucket_url(A , filename=A , use_cdn=A )
try:
# Load from URL or cache if already cached
__magic_name__ = cached_path(
A , cache_dir=A , force_download=A , proxies=A , resume_download=A , local_files_only=A , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
__magic_name__ = Config.load_yaml(A )
except EnvironmentError:
__magic_name__ = '''Can\'t load config for'''
raise EnvironmentError(A )
if resolved_config_file == config_file:
print('''loading configuration file from path''' )
else:
print('''loading configuration file cache''' )
return Config.load_yaml(A ), kwargs
def _SCREAMING_SNAKE_CASE ( snake_case_ : Tuple ):
__magic_name__ = torch.load('''dump.pt''' , map_location=in_tensor.device )
__magic_name__ = in_tensor.numpy()
__magic_name__ = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(snake_case_ , snake_case_ , rtol=0.01 , atol=0.1 ), (
f'{sum([1 for x in np.isclose(snake_case_ , snake_case_ , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'
" element-wise mismatch"
)
raise Exception('''tensors are all good''' )
# Hugging face functions below
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ):
__magic_name__ = urlparse(snake_case_ )
return parsed.scheme in ("http", "https")
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str , snake_case_ : Optional[Any]=True ):
__magic_name__ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
__magic_name__ = '''/''' not in model_id
if legacy_format:
return f'{endpoint}/{model_id}-{filename}'
else:
return f'{endpoint}/{model_id}/{filename}'
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Tuple , snake_case_ : List[str]=None , snake_case_ : Dict=0 , snake_case_ : Tuple=None , ):
__magic_name__ = '''python/{}'''.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(snake_case_ , snake_case_ ):
ua += "; " + "; ".join('''{}/{}'''.format(snake_case_ , snake_case_ ) for k, v in user_agent.items() )
elif isinstance(snake_case_ , snake_case_ ):
ua += "; " + user_agent
__magic_name__ = {'''user-agent''': ua}
if resume_size > 0:
__magic_name__ = '''bytes=%d-''' % (resume_size,)
__magic_name__ = requests.get(snake_case_ , stream=snake_case_ , proxies=snake_case_ , headers=snake_case_ )
if response.status_code == 416: # Range not satisfiable
return
__magic_name__ = response.headers.get('''Content-Length''' )
__magic_name__ = resume_size + int(snake_case_ ) if content_length is not None else None
__magic_name__ = tqdm(
unit='''B''' , unit_scale=snake_case_ , total=snake_case_ , initial=snake_case_ , desc='''Downloading''' , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(snake_case_ ) )
temp_file.write(snake_case_ )
progress.close()
def _SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Dict=None , snake_case_ : int=False , snake_case_ : List[Any]=None , snake_case_ : Tuple=10 , snake_case_ : int=False , snake_case_ : Any=None , snake_case_ : Tuple=False , ):
if cache_dir is None:
__magic_name__ = TRANSFORMERS_CACHE
if isinstance(snake_case_ , snake_case_ ):
__magic_name__ = str(snake_case_ )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
__magic_name__ = None
if not local_files_only:
try:
__magic_name__ = requests.head(snake_case_ , allow_redirects=snake_case_ , proxies=snake_case_ , timeout=snake_case_ )
if response.status_code == 200:
__magic_name__ = response.headers.get('''ETag''' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
__magic_name__ = url_to_filename(snake_case_ , snake_case_ )
# get cache path to put the file
__magic_name__ = os.path.join(snake_case_ , snake_case_ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(snake_case_ ):
return cache_path
else:
__magic_name__ = [
file
for file in fnmatch.filter(os.listdir(snake_case_ ) , filename + '''.*''' )
if not file.endswith('''.json''' ) and not file.endswith('''.lock''' )
]
if len(snake_case_ ) > 0:
return os.path.join(snake_case_ , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'''Cannot find the requested files in the cached path and outgoing traffic has been'''
''' disabled. To enable model look-ups and downloads online, set \'local_files_only\''''
''' to False.''' )
return None
# From now on, etag is not None.
if os.path.exists(snake_case_ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
__magic_name__ = cache_path + '''.lock'''
with FileLock(snake_case_ ):
# If the download just completed while the lock was activated.
if os.path.exists(snake_case_ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
__magic_name__ = cache_path + '''.incomplete'''
@contextmanager
def _resumable_file_manager():
with open(snake_case_ , '''a+b''' ) as f:
yield f
__magic_name__ = _resumable_file_manager
if os.path.exists(snake_case_ ):
__magic_name__ = os.stat(snake_case_ ).st_size
else:
__magic_name__ = 0
else:
__magic_name__ = partial(tempfile.NamedTemporaryFile , dir=snake_case_ , delete=snake_case_ )
__magic_name__ = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'''%s not found in cache or force_download set to True, downloading to %s''' , snake_case_ , temp_file.name , )
http_get(
snake_case_ , snake_case_ , proxies=snake_case_ , resume_size=snake_case_ , user_agent=snake_case_ , )
os.replace(temp_file.name , snake_case_ )
__magic_name__ = {'''url''': url, '''etag''': etag}
__magic_name__ = cache_path + '''.json'''
with open(snake_case_ , '''w''' ) as meta_file:
json.dump(snake_case_ , snake_case_ )
return cache_path
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : List[Any]=None ):
__magic_name__ = url.encode('''utf-8''' )
__magic_name__ = shaaaa(snake_case_ )
__magic_name__ = url_hash.hexdigest()
if etag:
__magic_name__ = etag.encode('''utf-8''' )
__magic_name__ = shaaaa(snake_case_ )
filename += "." + etag_hash.hexdigest()
if url.endswith('''.h5''' ):
filename += ".h5"
return filename
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str=None , snake_case_ : Tuple=False , snake_case_ : Union[str, Any]=None , snake_case_ : List[Any]=False , snake_case_ : Union[str, Any]=None , snake_case_ : List[str]=False , snake_case_ : Optional[int]=False , snake_case_ : Optional[int]=False , ):
if cache_dir is None:
__magic_name__ = TRANSFORMERS_CACHE
if isinstance(snake_case_ , snake_case_ ):
__magic_name__ = str(snake_case_ )
if isinstance(snake_case_ , snake_case_ ):
__magic_name__ = str(snake_case_ )
if is_remote_url(snake_case_ ):
# URL, so get it from the cache (downloading if necessary)
__magic_name__ = get_from_cache(
snake_case_ , cache_dir=snake_case_ , force_download=snake_case_ , proxies=snake_case_ , resume_download=snake_case_ , user_agent=snake_case_ , local_files_only=snake_case_ , )
elif os.path.exists(snake_case_ ):
# File, and it exists.
__magic_name__ = url_or_filename
elif urlparse(snake_case_ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('''file {} not found'''.format(snake_case_ ) )
else:
# Something unknown
raise ValueError('''unable to parse {} as a URL or as a local path'''.format(snake_case_ ) )
if extract_compressed_file:
if not is_zipfile(snake_case_ ) and not tarfile.is_tarfile(snake_case_ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
__magic_name__ , __magic_name__ = os.path.split(snake_case_ )
__magic_name__ = output_file.replace('''.''' , '''-''' ) + '''-extracted'''
__magic_name__ = os.path.join(snake_case_ , snake_case_ )
if os.path.isdir(snake_case_ ) and os.listdir(snake_case_ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
__magic_name__ = output_path + '''.lock'''
with FileLock(snake_case_ ):
shutil.rmtree(snake_case_ , ignore_errors=snake_case_ )
os.makedirs(snake_case_ )
if is_zipfile(snake_case_ ):
with ZipFile(snake_case_ , '''r''' ) as zip_file:
zip_file.extractall(snake_case_ )
zip_file.close()
elif tarfile.is_tarfile(snake_case_ ):
__magic_name__ = tarfile.open(snake_case_ )
tar_file.extractall(snake_case_ )
tar_file.close()
else:
raise EnvironmentError('''Archive format of {} could not be identified'''.format(snake_case_ ) )
return output_path_extracted
return output_path
def _SCREAMING_SNAKE_CASE ( snake_case_ : Dict , snake_case_ : int="," ):
assert isinstance(snake_case_ , snake_case_ )
if os.path.isfile(snake_case_ ):
with open(snake_case_ ) as f:
__magic_name__ = eval(f.read() )
else:
__magic_name__ = requests.get(snake_case_ )
try:
__magic_name__ = requests.json()
except Exception:
__magic_name__ = req.content.decode()
assert data is not None, "could not connect"
try:
__magic_name__ = eval(snake_case_ )
except Exception:
__magic_name__ = data.split('''\n''' )
req.close()
return data
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] ):
__magic_name__ = requests.get(snake_case_ )
__magic_name__ = np.array(Image.open(BytesIO(response.content ) ) )
return img
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] ):
__magic_name__ = url.split('''/''' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(snake_case_ )
with open(snake_case_ , '''rb''' ) as stream:
__magic_name__ = pkl.load(snake_case_ )
__magic_name__ = weights.pop('''model''' )
__magic_name__ = {}
for k, v in model.items():
__magic_name__ = torch.from_numpy(snake_case_ )
if "running_var" in k:
__magic_name__ = torch.tensor([0] )
__magic_name__ = k.replace('''running_var''' , '''num_batches_tracked''' )
__magic_name__ = zero
return new
def _SCREAMING_SNAKE_CASE ( ):
print(f'{os.path.abspath(os.path.join(snake_case_ , os.pardir ) )}/demo.ipynb' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Tuple="RGB" ):
assert isinstance(snake_case_ , snake_case_ )
if os.path.isfile(snake_case_ ):
__magic_name__ = cva.imread(snake_case_ )
else:
__magic_name__ = get_image_from_url(snake_case_ )
assert img is not None, f'could not connect to: {im}'
__magic_name__ = cva.cvtColor(snake_case_ , cva.COLOR_BGR2RGB )
if input_format == "RGB":
__magic_name__ = img[:, :, ::-1]
return img
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Dict=1 ):
return (images[i : i + batch] for i in range(0 , len(snake_case_ ) , snake_case_ )) | 678 | 1 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
lowerCamelCase = False
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase_ = "A painting of a squirrel eating a burger "
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_UpperCAmelCase )
UpperCAmelCase_ = VersatileDiffusionTextToImagePipeline.from_pretrained(_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase_ = generator.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def lowercase__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = VersatileDiffusionTextToImagePipeline.from_pretrained(
"shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase_ = "A painting of a squirrel eating a burger "
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
UpperCAmelCase_ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 82 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""google/vit-base-patch16-224""": """https://huggingface.co/vit-base-patch16-224/resolve/main/config.json""",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''vit'''
def __init__( self : List[str] , _UpperCAmelCase : Optional[int]=768 , _UpperCAmelCase : Optional[Any]=12 , _UpperCAmelCase : Dict=12 , _UpperCAmelCase : int=3072 , _UpperCAmelCase : Optional[Any]="gelu" , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : int=1e-12 , _UpperCAmelCase : List[str]=224 , _UpperCAmelCase : Tuple=16 , _UpperCAmelCase : Optional[Any]=3 , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[int]=16 , **_UpperCAmelCase : List[str] , ) -> List[str]:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = qkv_bias
UpperCAmelCase_ = encoder_stride
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = version.parse('''1.11''' )
@property
def lowercase__ ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowercase__ ( self : Union[str, Any] ) -> float:
'''simple docstring'''
return 1e-4
| 82 | 1 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_lowerCAmelCase = logging.getLogger(__name__)
@dataclass
class __A :
"""simple docstring"""
A_ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
A_ = field(
default=a , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
A_ = field(
default='NER' , metadata={'help': 'Task type to fine tune in training (e.g. NER, POS, etc)'} )
A_ = field(
default=a , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
A_ = field(default=a , metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
A_ = field(
default=a , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class __A :
"""simple docstring"""
A_ = field(
metadata={'help': 'The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'} )
A_ = field(
default=a , metadata={'help': 'Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'} , )
A_ = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A_ = field(
default=a , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def _lowerCAmelCase ( ) ->Optional[int]:
"""simple docstring"""
lowercase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__ , lowercase__ , lowercase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__ , lowercase__ , lowercase__ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
''' --overwrite_output_dir to overcome.''' )
lowercase__ = import_module('''tasks''' )
try:
lowercase__ = getattr(lowercase , model_args.task_type )
lowercase__ = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F'''Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
F'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , lowercase )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
lowercase__ = token_classification_task.get_labels(data_args.labels )
lowercase__ = dict(enumerate(lowercase ) )
lowercase__ = len(lowercase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowercase , idalabel=lowercase , labelaid={label: i for i, label in enumerate(lowercase )} , cache_dir=model_args.cache_dir , )
lowercase__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
lowercase__ = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowercase , cache_dir=model_args.cache_dir , )
# Get datasets
lowercase__ = (
TokenClassificationDataset(
token_classification_task=lowercase , data_dir=data_args.data_dir , tokenizer=lowercase , labels=lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
lowercase__ = (
TokenClassificationDataset(
token_classification_task=lowercase , data_dir=data_args.data_dir , tokenizer=lowercase , labels=lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(lowercase : np.ndarray , lowercase : np.ndarray ) -> Tuple[List[int], List[int]]:
lowercase__ = np.argmax(lowercase , axis=2 )
lowercase__ , lowercase__ = preds.shape
lowercase__ = [[] for _ in range(lowercase )]
lowercase__ = [[] for _ in range(lowercase )]
for i in range(lowercase ):
for j in range(lowercase ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(lowercase : EvalPrediction ) -> Dict:
lowercase__ , lowercase__ = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(lowercase , lowercase ),
"precision": precision_score(lowercase , lowercase ),
"recall": recall_score(lowercase , lowercase ),
"f1": fa_score(lowercase , lowercase ),
}
# Data collator
lowercase__ = DataCollatorWithPadding(lowercase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
lowercase__ = Trainer(
model=lowercase , args=lowercase , train_dataset=lowercase , eval_dataset=lowercase , compute_metrics=lowercase , data_collator=lowercase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowercase__ = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowercase__ = trainer.evaluate()
lowercase__ = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_process_zero():
with open(lowercase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , lowercase , lowercase )
writer.write('''%s = %s\n''' % (key, value) )
results.update(lowercase )
# Predict
if training_args.do_predict:
lowercase__ = TokenClassificationDataset(
token_classification_task=lowercase , data_dir=data_args.data_dir , tokenizer=lowercase , labels=lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
lowercase__ , lowercase__ , lowercase__ = trainer.predict(lowercase )
lowercase__ , lowercase__ = align_predictions(lowercase , lowercase )
lowercase__ = os.path.join(training_args.output_dir , '''test_results.txt''' )
if trainer.is_world_process_zero():
with open(lowercase , '''w''' ) as writer:
for key, value in metrics.items():
logger.info(''' %s = %s''' , lowercase , lowercase )
writer.write('''%s = %s\n''' % (key, value) )
# Save predictions
lowercase__ = os.path.join(training_args.output_dir , '''test_predictions.txt''' )
if trainer.is_world_process_zero():
with open(lowercase , '''w''' ) as writer:
with open(os.path.join(data_args.data_dir , '''test.txt''' ) , '''r''' ) as f:
token_classification_task.write_predictions_to_file(lowercase , lowercase , lowercase )
return results
def _lowerCAmelCase ( lowercase : List[str] ) ->Tuple:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 318 |
'''simple docstring'''
import argparse
import json
from tqdm import tqdm
def _lowerCAmelCase ( ) ->Optional[int]:
"""simple docstring"""
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--src_path''' , type=lowercase , default='''biencoder-nq-dev.json''' , help='''Path to raw DPR training data''' , )
parser.add_argument(
'''--evaluation_set''' , type=lowercase , help='''where to store parsed evaluation_set file''' , )
parser.add_argument(
'''--gold_data_path''' , type=lowercase , help='''where to store parsed gold_data_path file''' , )
lowercase__ = parser.parse_args()
with open(args.src_path , '''r''' ) as src_file, open(args.evaluation_set , '''w''' ) as eval_file, open(
args.gold_data_path , '''w''' ) as gold_file:
lowercase__ = json.load(lowercase )
for dpr_record in tqdm(lowercase ):
lowercase__ = dpr_record['''question''']
lowercase__ = [context['''title'''] for context in dpr_record['''positive_ctxs''']]
eval_file.write(question + '''\n''' )
gold_file.write('''\t'''.join(lowercase ) + '''\n''' )
if __name__ == "__main__":
main()
| 318 | 1 |
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def UpperCAmelCase_ ( ):
'''simple docstring'''
print('''Making key files...''' )
make_key_files('''rsa''' , 10_24 )
print('''Key files generation successful.''' )
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
print('''Generating prime p...''' )
SCREAMING_SNAKE_CASE__ = rabinMiller.generate_large_prime(_A )
print('''Generating prime q...''' )
SCREAMING_SNAKE_CASE__ = rabinMiller.generate_large_prime(_A )
SCREAMING_SNAKE_CASE__ = p * q
print('''Generating e that is relatively prime to (p - 1) * (q - 1)...''' )
while True:
SCREAMING_SNAKE_CASE__ = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(_A , (p - 1) * (q - 1) ) == 1:
break
print('''Calculating d that is mod inverse of e...''' )
SCREAMING_SNAKE_CASE__ = cryptoMath.find_mod_inverse(_A , (p - 1) * (q - 1) )
SCREAMING_SNAKE_CASE__ = (n, e)
SCREAMING_SNAKE_CASE__ = (n, d)
return (public_key, private_key)
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
if os.path.exists(F'''{name}_pubkey.txt''' ) or os.path.exists(F'''{name}_privkey.txt''' ):
print('''\nWARNING:''' )
print(
F'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'''
'''Use a different name or delete these files and re-run this program.''' )
sys.exit()
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = generate_key(_A )
print(F'''\nWriting public key to file {name}_pubkey.txt...''' )
with open(F'''{name}_pubkey.txt''' , '''w''' ) as out_file:
out_file.write(F'''{key_size},{public_key[0]},{public_key[1]}''' )
print(F'''Writing private key to file {name}_privkey.txt...''' )
with open(F'''{name}_privkey.txt''' , '''w''' ) as out_file:
out_file.write(F'''{key_size},{private_key[0]},{private_key[1]}''' )
if __name__ == "__main__":
main()
| 493 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_SCREAMING_SNAKE_CASE : Optional[Any] = get_tests_dir('''fixtures''')
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : Any ) -> Any:
# A mock response for an HTTP head request to emulate server down
SCREAMING_SNAKE_CASE__ = mock.Mock()
SCREAMING_SNAKE_CASE__ = 500
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = HTTPError
SCREAMING_SNAKE_CASE__ = {}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=__lowerCamelCase ) as mock_head:
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# This check we did call the fake head request
mock_head.assert_called()
def lowercase_ ( self : int ) -> Dict:
# This test is for deprecated behavior and can be removed in v5
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json''' )
@is_staging_test
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def lowercase_ ( cls : Union[str, Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = TOKEN
HfFolder.save_token(__lowerCamelCase )
@classmethod
def lowercase_ ( cls : Optional[int] ) -> Optional[int]:
try:
delete_repo(token=cls._token , repo_id='''test-feature-extractor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-feature-extractor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-feature-extractor''' )
except HTTPError:
pass
def lowercase_ ( self : Optional[Any] ) -> int:
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained(__lowerCamelCase )
feature_extractor.push_to_hub('''test-feature-extractor''' , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained(f'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
__lowerCamelCase , repo_id='''test-feature-extractor''' , push_to_hub=__lowerCamelCase , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained(f'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) )
def lowercase_ ( self : Tuple ) -> Any:
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained(__lowerCamelCase )
feature_extractor.push_to_hub('''valid_org/test-feature-extractor''' , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
__lowerCamelCase , repo_id='''valid_org/test-feature-extractor-org''' , push_to_hub=__lowerCamelCase , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor-org''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) )
def lowercase_ ( self : int ) -> int:
CustomFeatureExtractor.register_for_auto_class()
SCREAMING_SNAKE_CASE__ = CustomFeatureExtractor.from_pretrained(__lowerCamelCase )
feature_extractor.push_to_hub('''test-dynamic-feature-extractor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor'''} , )
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(
f'''{USER}/test-dynamic-feature-extractor''' , trust_remote_code=__lowerCamelCase )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , '''CustomFeatureExtractor''' )
| 493 | 1 |
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
def UpperCamelCase ( _lowerCAmelCase : Any ):
__a = torch.load(_lowerCAmelCase , map_location="""cpu""" )
if "model" in sd.keys():
__a = torch.load(_lowerCAmelCase , map_location="""cpu""" )["""model"""]
# pop unnecessary weights
__a = [
"""decoder.version""",
"""decoder.output_projection.weight""",
]
for key in keys_to_delete:
if key in sd:
sd.pop(_lowerCAmelCase )
__a = {
"""decoder.project_in_dim.weight""": """decoder.project_in.weight""",
"""decoder.project_out_dim.weight""": """decoder.project_out.weight""",
"""decoder.layer_norm.weight""": """decoder.final_layer_norm.weight""",
"""decoder.layer_norm.bias""": """decoder.final_layer_norm.bias""",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
__a = sd.pop(_lowerCAmelCase )
__a = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
__a = sd[key]
# We split QKV in separate Q,K,V
__a = key.replace(""".qkv_proj.""" , """.q_proj.""" )
__a = key.replace(""".qkv_proj.""" , """.k_proj.""" )
__a = key.replace(""".qkv_proj.""" , """.v_proj.""" )
__a = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
__a , __a , __a = torch.split(_lowerCAmelCase , depth // 3 , dim=0 )
__a = q
__a = k
__a = v
del sd[key]
return sd
@torch.no_grad()
def UpperCamelCase ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int]=None ):
__a = load_checkpoint(_lowerCAmelCase )
if config is not None:
__a = OPTConfig.from_pretrained(_lowerCAmelCase )
else:
__a = OPTConfig()
__a = OPTModel(_lowerCAmelCase ).half().eval()
model.load_state_dict(_lowerCAmelCase )
# Check results
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fairseq_path""",
type=str,
help=(
"""path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"""
""" https://huggingface.co/models?other=opt_metasq"""
),
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--hf_config""", default=None, type=str, help="""Define HF config.""")
__A = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 173 | """simple docstring"""
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class a ( unittest.TestCase ):
def __init__( self : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any]=13 , lowerCamelCase_ : Tuple=7 , lowerCamelCase_ : Dict=True , lowerCamelCase_ : Dict=True , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Tuple=99 , lowerCamelCase_ : int=32 , lowerCamelCase_ : int=5 , lowerCamelCase_ : List[str]=4 , lowerCamelCase_ : Dict=37 , lowerCamelCase_ : Optional[Any]="gelu" , lowerCamelCase_ : Dict=0.1 , lowerCamelCase_ : Tuple=0.1 , lowerCamelCase_ : int=5_12 , lowerCamelCase_ : Optional[int]=16 , lowerCamelCase_ : Tuple=2 , lowerCamelCase_ : Dict=0.02 , lowerCamelCase_ : Any=4 , ) -> Any:
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_attention_mask
__a = use_token_type_ids
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = type_sequence_label_size
__a = initializer_range
__a = num_choices
def lowerCAmelCase_ ( self : int ) -> List[Any]:
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_attention_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
__a = None
if self.use_token_type_ids:
__a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase_ ( self : Optional[Any] ) -> Any:
__a = self.prepare_config_and_inputs()
__a , __a , __a , __a = config_and_inputs
__a = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowerCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
__a = self.prepare_config_and_inputs()
__a , __a , __a , __a = config_and_inputs
__a = True
__a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class a ( A_ , unittest.TestCase ):
A_ : Union[str, Any] = True
A_ : Dict = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase_ ( self : Any ) -> Optional[int]:
__a = FlaxRobertaPreLayerNormModelTester(self )
@slow
def lowerCAmelCase_ ( self : Union[str, Any] ) -> Any:
for model_class_name in self.all_model_classes:
__a = model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=lowerCamelCase_ )
__a = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase_ )
@require_flax
class a ( unittest.TestCase ):
@slow
def lowerCAmelCase_ ( self : Dict ) -> Optional[int]:
__a = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=lowerCamelCase_ )
__a = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa )
__a = model(lowerCamelCase_ )[0]
__a = [1, 11, 5_02_65]
self.assertEqual(list(output.shape ) , lowerCamelCase_ )
# compare the actual values for a slice.
__a = np.array(
[[[40.48_80, 18.01_99, -5.23_67], [-1.88_77, -4.08_85, 10.70_85], [-2.26_13, -5.61_10, 7.26_65]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase_ , atol=1E-4 ) )
@slow
def lowerCAmelCase_ ( self : Optional[Any] ) -> Tuple:
__a = FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=lowerCamelCase_ )
__a = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa )
__a = model(lowerCamelCase_ )[0]
# compare the actual values for a slice.
__a = np.array(
[[[0.02_08, -0.03_56, 0.02_37], [-0.15_69, -0.04_11, -0.26_26], [0.18_79, 0.01_25, -0.00_89]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase_ , atol=1E-4 ) )
| 173 | 1 |
'''simple docstring'''
import sys
def _UpperCAmelCase ( _lowerCamelCase : Tuple ) -> int:
_lowerCAmelCase : List[str] = len(_lowerCamelCase )
_lowerCAmelCase : int = [[0 for x in range(_lowerCamelCase )] for x in range(_lowerCamelCase )]
_lowerCAmelCase : Union[str, Any] = [[0 for x in range(_lowerCamelCase )] for x in range(_lowerCamelCase )]
for chain_length in range(2 , _lowerCamelCase ):
for a in range(1 , n - chain_length + 1 ):
_lowerCAmelCase : Dict = a + chain_length - 1
_lowerCAmelCase : Dict = sys.maxsize
for c in range(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : str = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
_lowerCAmelCase : str = cost
_lowerCAmelCase : List[str] = c
return matrix, sol
def _UpperCAmelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : int ) -> Optional[Any]:
if i == j:
print("""A""" + str(_lowerCamelCase ) , end=""" """ )
else:
print("""(""" , end=""" """ )
print_optiomal_solution(_lowerCamelCase , _lowerCamelCase , optimal_solution[i][j] )
print_optiomal_solution(_lowerCamelCase , optimal_solution[i][j] + 1 , _lowerCamelCase )
print(""")""" , end=""" """ )
def _UpperCAmelCase ( ) -> List[str]:
_lowerCAmelCase : Optional[Any] = [30, 35, 15, 5, 10, 20, 25]
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = matrix_chain_order(_lowerCamelCase )
print("""No. of Operation required: """ + str(matrix[1][n - 1] ) )
print_optiomal_solution(_lowerCamelCase , 1 , n - 1 )
if __name__ == "__main__":
main()
| 384 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {"""configuration_xglm""": ["""XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XGLMConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""XGLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""XGLMTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XGLMForCausalLM""",
"""XGLMModel""",
"""XGLMPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""FlaxXGLMForCausalLM""",
"""FlaxXGLMModel""",
"""FlaxXGLMPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXGLMForCausalLM""",
"""TFXGLMModel""",
"""TFXGLMPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 384 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_:Dict = logging.get_logger(__name__)
def __UpperCamelCase ( _lowerCAmelCase ) -> int:
"""simple docstring"""
A : Tuple = DPTConfig(embedding_type="""hybrid""" )
if "large" in checkpoint_url:
A : Optional[int] = 1024
A : str = 4096
A : str = 24
A : Dict = 16
A : Tuple = [5, 11, 17, 23]
A : Optional[int] = [256, 512, 1024, 1024]
A : Dict = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
A : Optional[Any] = 768
A : Optional[int] = [1, 1, 1, 0.5]
A : int = [256, 512, 768, 768]
A : List[Any] = 150
A : Optional[Any] = 16
A : Optional[Any] = (1, 384, 384)
A : Tuple = False
A : str = """project"""
if "ade" in checkpoint_url:
A : Optional[Any] = True
A : int = 768
A : int = [1, 1, 1, 0.5]
A : Any = 150
A : Tuple = 16
A : Dict = """huggingface/label-files"""
A : Optional[Any] = """ade20k-id2label.json"""
A : Tuple = json.load(open(cached_download(hf_hub_url(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) ) , """r""" ) )
A : Union[str, Any] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
A : Optional[int] = idalabel
A : Union[str, Any] = {v: k for k, v in idalabel.items()}
A : Optional[int] = [1, 150, 480, 480]
return config, expected_shape
def __UpperCamelCase ( _lowerCAmelCase ) -> int:
"""simple docstring"""
A : Optional[Any] = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
def __UpperCamelCase ( _lowerCAmelCase ) -> Tuple:
"""simple docstring"""
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
A : Tuple = name.replace("""pretrained.model""" , """dpt.encoder""" )
if "pretrained.model" in name:
A : str = name.replace("""pretrained.model""" , """dpt.embeddings""" )
if "patch_embed" in name:
A : Tuple = name.replace("""patch_embed""" , """""" )
if "pos_embed" in name:
A : Any = name.replace("""pos_embed""" , """position_embeddings""" )
if "attn.proj" in name:
A : Dict = name.replace("""attn.proj""" , """attention.output.dense""" )
if "proj" in name and "project" not in name:
A : Dict = name.replace("""proj""" , """projection""" )
if "blocks" in name:
A : Tuple = name.replace("""blocks""" , """layer""" )
if "mlp.fc1" in name:
A : Tuple = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
A : List[str] = name.replace("""mlp.fc2""" , """output.dense""" )
if "norm1" in name and "backbone" not in name:
A : List[str] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name and "backbone" not in name:
A : Union[str, Any] = name.replace("""norm2""" , """layernorm_after""" )
if "scratch.output_conv" in name:
A : Any = name.replace("""scratch.output_conv""" , """head""" )
if "scratch" in name:
A : int = name.replace("""scratch""" , """neck""" )
if "layer1_rn" in name:
A : int = name.replace("""layer1_rn""" , """convs.0""" )
if "layer2_rn" in name:
A : str = name.replace("""layer2_rn""" , """convs.1""" )
if "layer3_rn" in name:
A : Dict = name.replace("""layer3_rn""" , """convs.2""" )
if "layer4_rn" in name:
A : Optional[Any] = name.replace("""layer4_rn""" , """convs.3""" )
if "refinenet" in name:
A : Tuple = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
A : int = name.replace(f'''refinenet{layer_idx}''' , f'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
A : str = name.replace("""out_conv""" , """projection""" )
if "resConfUnit1" in name:
A : Optional[Any] = name.replace("""resConfUnit1""" , """residual_layer1""" )
if "resConfUnit2" in name:
A : Any = name.replace("""resConfUnit2""" , """residual_layer2""" )
if "conv1" in name:
A : Tuple = name.replace("""conv1""" , """convolution1""" )
if "conv2" in name:
A : str = name.replace("""conv2""" , """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
A : Tuple = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
A : List[str] = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
A : Optional[int] = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
A : Dict = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
A : str = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
A : str = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
A : Optional[Any] = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
A : Any = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
A : Tuple = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
A : Tuple = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
A : int = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
A : List[str] = name.replace("""pretrained""" , """dpt""" )
if "bn" in name:
A : Optional[Any] = name.replace("""bn""" , """batch_norm""" )
if "head" in name:
A : Optional[Any] = name.replace("""head""" , """head.head""" )
if "encoder.norm" in name:
A : List[str] = name.replace("""encoder.norm""" , """layernorm""" )
if "auxlayer" in name:
A : str = name.replace("""auxlayer""" , """auxiliary_head.head""" )
if "backbone" in name:
A : List[str] = name.replace("""backbone""" , """backbone.bit.encoder""" )
if ".." in name:
A : Optional[int] = name.replace("""..""" , """.""" )
if "stem.conv" in name:
A : List[Any] = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
A : Union[str, Any] = name.replace("""blocks""" , """layers""" )
if "convolution" in name and "backbone" in name:
A : Dict = name.replace("""convolution""" , """conv""" )
if "layer" in name and "backbone" in name:
A : Union[str, Any] = name.replace("""layer""" , """layers""" )
if "backbone.bit.encoder.bit" in name:
A : List[str] = name.replace("""backbone.bit.encoder.bit""" , """backbone.bit""" )
if "embedder.conv" in name:
A : Optional[int] = name.replace("""embedder.conv""" , """embedder.convolution""" )
if "backbone.bit.encoder.stem.norm" in name:
A : List[Any] = name.replace("""backbone.bit.encoder.stem.norm""" , """backbone.bit.embedder.norm""" )
return name
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Any:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A : int = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
A : List[str] = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
A : int = in_proj_weight[: config.hidden_size, :]
A : str = in_proj_bias[: config.hidden_size]
A : str = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A : Any = in_proj_weight[
-config.hidden_size :, :
]
A : Dict = in_proj_bias[-config.hidden_size :]
def __UpperCamelCase ( ) -> Tuple:
"""simple docstring"""
A : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A : Union[str, Any] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
"""simple docstring"""
A , A : Any = get_dpt_config(_lowerCAmelCase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
A : Tuple = torch.load(_lowerCAmelCase , map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(_lowerCAmelCase )
# rename keys
for key in state_dict.copy().keys():
A : Optional[int] = state_dict.pop(_lowerCAmelCase )
A : Optional[Any] = val
# read in qkv matrices
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase )
# load HuggingFace model
A : Optional[int] = DPTForSemanticSegmentation(_lowerCAmelCase ) if """ade""" in checkpoint_url else DPTForDepthEstimation(_lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
model.eval()
# Check outputs on an image
A : int = 480 if """ade""" in checkpoint_url else 384
A : str = DPTImageProcessor(size=_lowerCAmelCase )
A : Optional[int] = prepare_img()
A : Tuple = image_processor(_lowerCAmelCase , return_tensors="""pt""" )
# forward pass
A : Any = model(**_lowerCAmelCase ).logits if """ade""" in checkpoint_url else model(**_lowerCAmelCase ).predicted_depth
if show_prediction:
A : Dict = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="""bicubic""" , align_corners=_lowerCAmelCase , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCAmelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
model.push_to_hub("""ybelkada/dpt-hybrid-midas""" )
image_processor.push_to_hub("""ybelkada/dpt-hybrid-midas""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you're pushing to the hub.""",
)
parser.add_argument(
"""--show_prediction""",
action="""store_true""",
)
SCREAMING_SNAKE_CASE_:Optional[Any] = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 520 |
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
@staticmethod
def _lowerCAmelCase ( *lowerCamelCase__, **lowerCamelCase__ ):
pass
def __UpperCamelCase ( _lowerCAmelCase ) -> str:
"""simple docstring"""
A : List[Any] = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def __UpperCamelCase ( _lowerCAmelCase ) -> Dict:
"""simple docstring"""
A : Union[str, Any] = np.array(_lowerCAmelCase )
A : str = npimg.shape
return {"hash": hashimage(_lowerCAmelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : int = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
__lowerCamelCase : Union[str, Any] = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A : Union[str, Any] = MaskGenerationPipeline(model=lowerCamelCase__, image_processor=lowerCamelCase__ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
pass
@require_tf
@unittest.skip("""Image segmentation not implemented in TF""" )
def _lowerCAmelCase ( self ):
pass
@slow
@require_torch
def _lowerCAmelCase ( self ):
A : Tuple = pipeline("""mask-generation""", model="""facebook/sam-vit-huge""" )
A : Dict = image_segmenter("""http://images.cocodataset.org/val2017/000000039769.jpg""", points_per_batch=256 )
# Shortening by hashing
A : Union[str, Any] = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(lowerCamelCase__ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(lowerCamelCase__, decimals=4 ), [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0444},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.021},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0167},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0132},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0053},
{"""mask""": {"""hash""": """e2d0b7a0b7""", """shape""": (480, 640)}, """scores""": 0.9967},
{"""mask""": {"""hash""": """453c7844bd""", """shape""": (480, 640)}, """scores""": 0.993},
{"""mask""": {"""hash""": """3d44f2926d""", """shape""": (480, 640)}, """scores""": 0.9909},
{"""mask""": {"""hash""": """64033ddc3f""", """shape""": (480, 640)}, """scores""": 0.9879},
{"""mask""": {"""hash""": """801064ff79""", """shape""": (480, 640)}, """scores""": 0.9834},
{"""mask""": {"""hash""": """6172f276ef""", """shape""": (480, 640)}, """scores""": 0.9716},
{"""mask""": {"""hash""": """b49e60e084""", """shape""": (480, 640)}, """scores""": 0.9612},
{"""mask""": {"""hash""": """a811e775fd""", """shape""": (480, 640)}, """scores""": 0.9599},
{"""mask""": {"""hash""": """a6a8ebcf4b""", """shape""": (480, 640)}, """scores""": 0.9552},
{"""mask""": {"""hash""": """9d8257e080""", """shape""": (480, 640)}, """scores""": 0.9532},
{"""mask""": {"""hash""": """32de6454a8""", """shape""": (480, 640)}, """scores""": 0.9516},
{"""mask""": {"""hash""": """af3d4af2c8""", """shape""": (480, 640)}, """scores""": 0.9499},
{"""mask""": {"""hash""": """3c6db475fb""", """shape""": (480, 640)}, """scores""": 0.9483},
{"""mask""": {"""hash""": """c290813fb9""", """shape""": (480, 640)}, """scores""": 0.9464},
{"""mask""": {"""hash""": """b6f0b8f606""", """shape""": (480, 640)}, """scores""": 0.943},
{"""mask""": {"""hash""": """92ce16bfdf""", """shape""": (480, 640)}, """scores""": 0.943},
{"""mask""": {"""hash""": """c749b25868""", """shape""": (480, 640)}, """scores""": 0.9408},
{"""mask""": {"""hash""": """efb6cab859""", """shape""": (480, 640)}, """scores""": 0.9335},
{"""mask""": {"""hash""": """1ff2eafb30""", """shape""": (480, 640)}, """scores""": 0.9326},
{"""mask""": {"""hash""": """788b798e24""", """shape""": (480, 640)}, """scores""": 0.9262},
{"""mask""": {"""hash""": """abea804f0e""", """shape""": (480, 640)}, """scores""": 0.8999},
{"""mask""": {"""hash""": """7b9e8ddb73""", """shape""": (480, 640)}, """scores""": 0.8986},
{"""mask""": {"""hash""": """cd24047c8a""", """shape""": (480, 640)}, """scores""": 0.8984},
{"""mask""": {"""hash""": """6943e6bcbd""", """shape""": (480, 640)}, """scores""": 0.8873},
{"""mask""": {"""hash""": """b5f47c9191""", """shape""": (480, 640)}, """scores""": 0.8871}
], )
# fmt: on
@require_torch
@slow
def _lowerCAmelCase ( self ):
A : Union[str, Any] = """facebook/sam-vit-huge"""
A : int = pipeline("""mask-generation""", model=lowerCamelCase__ )
A : int = image_segmenter(
"""http://images.cocodataset.org/val2017/000000039769.jpg""", pred_iou_thresh=1, points_per_batch=256 )
# Shortening by hashing
A : Tuple = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(lowerCamelCase__ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(lowerCamelCase__, decimals=4 ), [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0444},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.0210},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0167},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0132},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0053},
], )
| 520 | 1 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__lowerCAmelCase =logging.getLogger(__name__)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = False , ):
"""simple docstring"""
UpperCAmelCase = bnb_quantization_config.load_in_abit
UpperCAmelCase = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"
" make sure you have the latest version of `bitsandbytes` installed." )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"
"make sure you have the latest version of `bitsandbytes` installed." )
UpperCAmelCase = []
# custom device map
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and len(device_map.keys() ) > 1:
UpperCAmelCase = [key for key, value in device_map.items() if value in ["disk", "cpu"]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
UpperCAmelCase = get_keys_to_not_convert(_lowerCAmelCase )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(_lowerCAmelCase )
UpperCAmelCase = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
UpperCAmelCase = []
UpperCAmelCase = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(_lowerCAmelCase )
# compatibility with peft
UpperCAmelCase = load_in_abit
UpperCAmelCase = load_in_abit
UpperCAmelCase = get_parameter_device(_lowerCAmelCase )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"It is not recommended to quantize a loaded model. "
"The model should be instantiated under the `init_empty_weights` context manager." )
UpperCAmelCase = replace_with_bnb_layers(_lowerCAmelCase , _lowerCAmelCase , modules_to_not_convert=_lowerCAmelCase )
# convert param to the right dtype
UpperCAmelCase = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
UpperCAmelCase = name.replace(".weight" , "" ).replace(".bias" , "" )
UpperCAmelCase = getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(_lowerCAmelCase ):
param.to(_lowerCAmelCase )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization." )
logger.info(
F'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
"We move the model to cuda." )
return model
elif weights_location is None:
raise RuntimeError(
F'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
UpperCAmelCase = replace_with_bnb_layers(
_lowerCAmelCase , _lowerCAmelCase , modules_to_not_convert=_lowerCAmelCase )
UpperCAmelCase = get_quantized_model_device_map(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , max_memory=_lowerCAmelCase , no_split_module_classes=_lowerCAmelCase , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
UpperCAmelCase = True
UpperCAmelCase = any(x in list(device_map.values() ) for x in ["cpu", "disk"] )
load_checkpoint_in_model(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , dtype=bnb_quantization_config.torch_dtype , offload_folder=_lowerCAmelCase , offload_state_dict=_lowerCAmelCase , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(_lowerCAmelCase , device_map=_lowerCAmelCase , offload_dir=_lowerCAmelCase )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None ):
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
UpperCAmelCase = {"": torch.cuda.current_device()}
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization." )
logger.info("The device_map was not initialized." "Setting device_map to `{'':torch.cuda.current_device()}`." )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or "
"'sequential'." )
UpperCAmelCase = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
UpperCAmelCase = {}
UpperCAmelCase = special_dtypes
UpperCAmelCase = no_split_module_classes
UpperCAmelCase = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
UpperCAmelCase = get_balanced_memory(
_lowerCAmelCase , low_zero=(device_map == "balanced_low_0") , max_memory=_lowerCAmelCase , **_lowerCAmelCase , )
UpperCAmelCase = max_memory
UpperCAmelCase = infer_auto_device_map(_lowerCAmelCase , **_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
# check if don't have any quantized module on the cpu
UpperCAmelCase = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
UpperCAmelCase = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n " )
else:
logger.info(
"Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit" )
del device_map_without_some_modules
return device_map
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None ):
"""simple docstring"""
if modules_to_not_convert is None:
UpperCAmelCase = []
UpperCAmelCase , UpperCAmelCase = _replace_with_bnb_layers(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug." )
return model
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , ):
"""simple docstring"""
UpperCAmelCase = False
for name, module in model.named_children():
if current_key_name is None:
UpperCAmelCase = []
current_key_name.append(_lowerCAmelCase )
if isinstance(_lowerCAmelCase , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
UpperCAmelCase = ".".join(_lowerCAmelCase )
UpperCAmelCase = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
UpperCAmelCase = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
UpperCAmelCase = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_lowerCAmelCase , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
UpperCAmelCase = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("load_in_8bit and load_in_4bit can't be both False" )
UpperCAmelCase = module.weight.data
if module.bias is not None:
UpperCAmelCase = module.bias.data
bnb_module.requires_grad_(_lowerCAmelCase )
setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase = True
if len(list(module.children() ) ) > 0:
UpperCAmelCase , UpperCAmelCase = _replace_with_bnb_layers(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __UpperCamelCase ( _lowerCAmelCase ):
"""simple docstring"""
with init_empty_weights():
UpperCAmelCase = deepcopy(_lowerCAmelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
UpperCAmelCase = find_tied_parameters(_lowerCAmelCase )
# For compatibility with Accelerate < 0.18
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
UpperCAmelCase = sum(_lowerCAmelCase , [] )
UpperCAmelCase = len(_lowerCAmelCase ) > 0
# Check if it is a base model
UpperCAmelCase = False
if hasattr(_lowerCAmelCase , "base_model_prefix" ):
UpperCAmelCase = not hasattr(_lowerCAmelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCAmelCase = list(model.named_children() )
UpperCAmelCase = [list_modules[-1][0]]
# add last module together with tied weights
UpperCAmelCase = set(_lowerCAmelCase ) - set(_lowerCAmelCase )
UpperCAmelCase = list(set(_lowerCAmelCase ) ) + list(_lowerCAmelCase )
# remove ".weight" from the keys
UpperCAmelCase = [".weight", ".bias"]
UpperCAmelCase = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCAmelCase = name.replace(_lowerCAmelCase , "" )
filtered_module_names.append(_lowerCAmelCase )
return filtered_module_names
def __UpperCamelCase ( _lowerCAmelCase ):
"""simple docstring"""
for m in model.modules():
if isinstance(_lowerCAmelCase , bnb.nn.Linearabit ):
return True
return False
def __UpperCamelCase ( _lowerCAmelCase ):
"""simple docstring"""
return next(parameter.parameters() ).device
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
if fpaa_statistics is None:
set_module_tensor_to_device(_lowerCAmelCase , _lowerCAmelCase , 0 , dtype=_lowerCAmelCase , value=_lowerCAmelCase )
UpperCAmelCase = param_name
UpperCAmelCase = model
if "." in tensor_name:
UpperCAmelCase = tensor_name.split("." )
for split in splits[:-1]:
UpperCAmelCase = getattr(_lowerCAmelCase , _lowerCAmelCase )
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''' )
UpperCAmelCase = new_module
UpperCAmelCase = splits[-1]
# offload weights
UpperCAmelCase = False
offload_weight(module._parameters[tensor_name] , _lowerCAmelCase , _lowerCAmelCase , index=_lowerCAmelCase )
if hasattr(module._parameters[tensor_name] , "SCB" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("weight" , "SCB" ) , _lowerCAmelCase , index=_lowerCAmelCase , )
else:
offload_weight(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , index=_lowerCAmelCase )
offload_weight(_lowerCAmelCase , param_name.replace("weight" , "SCB" ) , _lowerCAmelCase , index=_lowerCAmelCase )
set_module_tensor_to_device(_lowerCAmelCase , _lowerCAmelCase , "meta" , dtype=_lowerCAmelCase , value=torch.empty(*param.size() ) )
| 333 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self : Optional[int] ):
UpperCAmelCase = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
UpperCAmelCase = tf.convert_to_tensor(
[[5, 1_2_1, 1_1, 6_6_0, 1_6, 7_3_0, 2_5_5_4_3, 1_1_0, 8_3, 6]] ,dtype=tf.intaa ,) # J'aime le camembert !"
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE )["last_hidden_state"]
UpperCAmelCase = tf.TensorShape((1, 1_0, 7_6_8) )
self.assertEqual(output.shape ,__SCREAMING_SNAKE_CASE )
# compare the actual values for a slice.
UpperCAmelCase = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] ,dtype=tf.floataa ,)
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
| 333 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''tanreinama/GPTSAN-2.8B-spout_is_uniform''': (
'''https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json'''
),
}
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str = "gptsan-japanese"
SCREAMING_SNAKE_CASE_: List[str] = [
"past_key_values",
]
SCREAMING_SNAKE_CASE_: Dict = {
"hidden_size": "d_model",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Optional[Any] , UpperCAmelCase_ : int=36_000 , UpperCAmelCase_ : Dict=1_280 , UpperCAmelCase_ : Tuple=1_024 , UpperCAmelCase_ : Union[str, Any]=8_192 , UpperCAmelCase_ : Dict=4_096 , UpperCAmelCase_ : str=128 , UpperCAmelCase_ : Dict=10 , UpperCAmelCase_ : Dict=0 , UpperCAmelCase_ : Dict=16 , UpperCAmelCase_ : Dict=16 , UpperCAmelCase_ : Any=128 , UpperCAmelCase_ : str=0.0 , UpperCAmelCase_ : Union[str, Any]=1E-5 , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : Optional[int]="float32" , UpperCAmelCase_ : int=False , UpperCAmelCase_ : int=False , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Optional[Any]=0.002 , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[Any]=35_998 , UpperCAmelCase_ : Any=35_995 , UpperCAmelCase_ : List[Any]=35_999 , **UpperCAmelCase_ : Dict , ) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase = vocab_size
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = d_model
_lowerCAmelCase = d_ff
_lowerCAmelCase = d_ext
_lowerCAmelCase = d_spout
_lowerCAmelCase = num_switch_layers
_lowerCAmelCase = num_ext_layers
_lowerCAmelCase = num_switch_layers + num_ext_layers
_lowerCAmelCase = num_heads
_lowerCAmelCase = num_experts
_lowerCAmelCase = expert_capacity
_lowerCAmelCase = dropout_rate
_lowerCAmelCase = layer_norm_epsilon
_lowerCAmelCase = router_bias
_lowerCAmelCase = router_jitter_noise
_lowerCAmelCase = router_dtype
_lowerCAmelCase = router_ignore_padding_tokens
_lowerCAmelCase = output_hidden_states
_lowerCAmelCase = output_attentions
_lowerCAmelCase = initializer_factor
_lowerCAmelCase = output_router_logits
_lowerCAmelCase = use_cache
super().__init__(
separator_token_id=UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ , )
| 716 |
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str = None
SCREAMING_SNAKE_CASE_: List[str] = BloomTokenizerFast
SCREAMING_SNAKE_CASE_: List[str] = BloomTokenizerFast
SCREAMING_SNAKE_CASE_: Any = True
SCREAMING_SNAKE_CASE_: List[Any] = False
SCREAMING_SNAKE_CASE_: Optional[int] = "tokenizer_file"
SCREAMING_SNAKE_CASE_: str = {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
def __lowerCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
super().setUp()
_lowerCAmelCase = BloomTokenizerFast.from_pretrained('bigscience/tokenizer' )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCamelCase ( self : List[str] , **UpperCAmelCase_ : Any ) -> Optional[Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def __lowerCamelCase ( self : List[Any] ) -> str:
"""simple docstring"""
_lowerCAmelCase = self.get_rust_tokenizer()
_lowerCAmelCase = ['The quick brown fox</s>', 'jumps over the lazy dog</s>']
_lowerCAmelCase = [[2_175, 23_714, 73_173, 144_252, 2], [77, 132_619, 3_478, 368, 109_586, 35_433, 2]]
_lowerCAmelCase = tokenizer.batch_encode_plus(UpperCAmelCase_ )['input_ids']
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
_lowerCAmelCase = tokenizer.batch_decode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def __lowerCamelCase ( self : Any , UpperCAmelCase_ : List[str]=6 ) -> Any:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
_lowerCAmelCase = 'This is a simple input'
_lowerCAmelCase = ['This is a simple input 1', 'This is a simple input 2']
_lowerCAmelCase = ('This is a simple input', 'This is a pair')
_lowerCAmelCase = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
try:
tokenizer_r.encode(UpperCAmelCase_ , max_length=UpperCAmelCase_ )
tokenizer_r.encode_plus(UpperCAmelCase_ , max_length=UpperCAmelCase_ )
tokenizer_r.batch_encode_plus(UpperCAmelCase_ , max_length=UpperCAmelCase_ )
tokenizer_r.encode(UpperCAmelCase_ , max_length=UpperCAmelCase_ )
tokenizer_r.batch_encode_plus(UpperCAmelCase_ , max_length=UpperCAmelCase_ )
except ValueError:
self.fail('Bloom Tokenizer should be able to deal with padding' )
_lowerCAmelCase = None # Hotfixing padding = None
self.assertRaises(UpperCAmelCase_ , tokenizer_r.encode , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' )
# Simple input
self.assertRaises(UpperCAmelCase_ , tokenizer_r.encode_plus , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' )
# Simple input
self.assertRaises(
UpperCAmelCase_ , tokenizer_r.batch_encode_plus , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' , )
# Pair input
self.assertRaises(UpperCAmelCase_ , tokenizer_r.encode , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' )
# Pair input
self.assertRaises(UpperCAmelCase_ , tokenizer_r.encode_plus , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' )
# Pair input
self.assertRaises(
UpperCAmelCase_ , tokenizer_r.batch_encode_plus , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' , )
def __lowerCamelCase ( self : str ) -> Any:
"""simple docstring"""
_lowerCAmelCase = self.get_rust_tokenizer()
_lowerCAmelCase = load_dataset('xnli' , 'all_languages' , split='test' , streaming=UpperCAmelCase_ )
_lowerCAmelCase = next(iter(UpperCAmelCase_ ) )['premise'] # pick up one data
_lowerCAmelCase = list(sample_data.values() )
_lowerCAmelCase = list(map(tokenizer.encode , UpperCAmelCase_ ) )
_lowerCAmelCase = [tokenizer.decode(UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ ) for x in output_tokens]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def __lowerCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 491 | 0 |
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
class A ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase : List[str] = ["""pixel_values"""]
def __init__( self : Optional[Any] , _UpperCamelCase : bool = True , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , _UpperCamelCase : bool = True , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : bool = True , _UpperCamelCase : Union[int, float] = 1 / 255 , _UpperCamelCase : bool = True , _UpperCamelCase : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , _UpperCamelCase : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **_UpperCamelCase : Any , ):
super().__init__(**_UpperCamelCase)
_lowercase: Any = size if size is not None else {"shortest_edge": 224}
_lowercase: str = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase)
_lowercase: Tuple = crop_size if crop_size is not None else {"height": 224, "width": 224}
_lowercase: Optional[Any] = get_size_dict(_UpperCamelCase , param_name="crop_size")
_lowercase: Dict = do_resize
_lowercase: List[str] = size
_lowercase: int = resample
_lowercase: int = do_center_crop
_lowercase: List[Any] = crop_size
_lowercase: Tuple = do_rescale
_lowercase: int = rescale_factor
_lowercase: Union[str, Any] = do_normalize
_lowercase: List[str] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_lowercase: Optional[int] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCAmelCase__ ( self : int , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, int] , _UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : List[str] , ):
_lowercase: Optional[int] = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase)
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
_lowercase: int = int((256 / 224) * size["shortest_edge"])
_lowercase: Optional[Any] = get_resize_output_image_size(_UpperCamelCase , size=_UpperCamelCase , default_to_square=_UpperCamelCase)
_lowercase: List[str] = {"height": output_size[0], "width": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}")
return resize(
_UpperCamelCase , size=(size_dict["height"], size_dict["width"]) , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase)
def UpperCAmelCase__ ( self : List[Any] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, int] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : List[Any] , ):
_lowercase: str = get_size_dict(_UpperCamelCase)
if "height" not in size or "width" not in size:
raise ValueError(f"Size dict must have keys 'height' and 'width'. Got {size.keys()}")
return center_crop(_UpperCamelCase , size=(size["height"], size["width"]) , data_format=_UpperCamelCase , **_UpperCamelCase)
def UpperCAmelCase__ ( self : int , _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[int, float] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Union[str, Any] , ):
return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase)
def UpperCAmelCase__ ( self : Union[str, Any] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[float, List[float]] , _UpperCamelCase : Union[float, List[float]] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : List[str] , ):
return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase)
def UpperCAmelCase__ ( self : Optional[int] , _UpperCamelCase : ImageInput , _UpperCamelCase : Optional[bool] = None , _UpperCamelCase : Optional[Dict[str, int]] = None , _UpperCamelCase : PILImageResampling = None , _UpperCamelCase : Optional[bool] = None , _UpperCamelCase : Optional[Dict[str, int]] = None , _UpperCamelCase : Optional[bool] = None , _UpperCamelCase : Optional[float] = None , _UpperCamelCase : Optional[bool] = None , _UpperCamelCase : Optional[Union[float, Iterable[float]]] = None , _UpperCamelCase : Optional[Union[float, Iterable[float]]] = None , _UpperCamelCase : Optional[TensorType] = None , _UpperCamelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCamelCase : List[str] , ):
_lowercase: Dict = do_resize if do_resize is not None else self.do_resize
_lowercase: List[str] = resample if resample is not None else self.resample
_lowercase: int = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowercase: Any = do_rescale if do_rescale is not None else self.do_rescale
_lowercase: Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowercase: Tuple = do_normalize if do_normalize is not None else self.do_normalize
_lowercase: List[str] = image_mean if image_mean is not None else self.image_mean
_lowercase: Union[str, Any] = image_std if image_std is not None else self.image_std
_lowercase: Any = size if size is not None else self.size
_lowercase: Tuple = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase)
_lowercase: Optional[int] = crop_size if crop_size is not None else self.crop_size
_lowercase: str = get_size_dict(_UpperCamelCase , param_name="crop_size")
_lowercase: Any = make_list_of_images(_UpperCamelCase)
if not valid_images(_UpperCamelCase):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True.")
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True.")
# All transformations expect numpy arrays.
_lowercase: Optional[Any] = [to_numpy_array(_UpperCamelCase) for image in images]
if do_resize:
_lowercase: List[str] = [self.resize(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase) for image in images]
if do_center_crop:
_lowercase: Dict = [self.center_crop(_UpperCamelCase , _UpperCamelCase) for image in images]
if do_rescale:
_lowercase: Any = [self.rescale(_UpperCamelCase , _UpperCamelCase) for image in images]
if do_normalize:
_lowercase: Dict = [self.normalize(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase) for image in images]
_lowercase: Optional[Any] = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase) for image in images]
_lowercase: int = {"pixel_values": images}
return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase)
| 226 |
def __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
if index == number_of_items:
return 0
_lowercase: str = 0
_lowercase: Tuple = 0
_lowercase: int = knapsack(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , index + 1 )
if weights[index] <= max_weight:
_lowercase: int = values[index] + knapsack(
__magic_name__ , __magic_name__ , __magic_name__ , max_weight - weights[index] , index + 1 )
return max(__magic_name__ , __magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 226 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowerCAmelCase__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ["GPTSw3Tokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 471 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class lowercase ( a_ ):
pass
class lowercase :
def __init__( self , _snake_case) -> None:
UpperCAmelCase_ : Any = data
UpperCAmelCase_ : Node | None = None
def __iter__( self) -> Optional[int]:
UpperCAmelCase_ : int = self
UpperCAmelCase_ : List[str] = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(_snake_case)
yield node.data
UpperCAmelCase_ : Tuple = node.next_node
@property
def _snake_case ( self) -> bool:
try:
list(self)
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
lowerCAmelCase__ = Node(1)
lowerCAmelCase__ = Node(2)
lowerCAmelCase__ = Node(3)
lowerCAmelCase__ = Node(4)
print(root_node.has_loop) # False
lowerCAmelCase__ = root_node.next_node
print(root_node.has_loop) # True
lowerCAmelCase__ = Node(5)
lowerCAmelCase__ = Node(6)
lowerCAmelCase__ = Node(5)
lowerCAmelCase__ = Node(6)
print(root_node.has_loop) # False
lowerCAmelCase__ = Node(1)
print(root_node.has_loop) # False
| 471 | 1 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class __magic_name__ ( unittest.TestCase ):
SCREAMING_SNAKE_CASE = inspect.getfile(accelerate.test_utils )
SCREAMING_SNAKE_CASE = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_cli.py'] )
SCREAMING_SNAKE_CASE = ['accelerate', 'launch']
SCREAMING_SNAKE_CASE = Path.home() / '.cache/huggingface/accelerate'
SCREAMING_SNAKE_CASE = 'default_config.yaml'
SCREAMING_SNAKE_CASE = config_folder / config_file
SCREAMING_SNAKE_CASE = config_folder / '_default_config.yaml'
SCREAMING_SNAKE_CASE = Path('tests/test_configs' )
@classmethod
def __magic_name__ ( cls ) -> Optional[Any]:
'''simple docstring'''
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def __magic_name__ ( cls ) -> int:
'''simple docstring'''
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a =self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
for config in sorted(self.test_config_path.glob('**/*.yaml' ) ):
with self.subTest(config_file=__snake_case ):
execute_subprocess_async(
self.base_cmd + ['--config_file', str(__snake_case ), self.test_file_path] , env=os.environ.copy() )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
execute_subprocess_async(['accelerate', 'test'] , env=os.environ.copy() )
class __magic_name__ ( unittest.TestCase ):
SCREAMING_SNAKE_CASE = 'test-tpu'
SCREAMING_SNAKE_CASE = 'us-central1-a'
SCREAMING_SNAKE_CASE = 'ls'
SCREAMING_SNAKE_CASE = ['accelerate', 'tpu-config']
SCREAMING_SNAKE_CASE = 'cd /usr/share'
SCREAMING_SNAKE_CASE = 'tests/test_samples/test_command_file.sh'
SCREAMING_SNAKE_CASE = 'Running gcloud compute tpus tpu-vm ssh'
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =run_command(
self.cmd
+ ['--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug'] , return_stdout=__snake_case , )
self.assertIn(
f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all' , __snake_case , )
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command',
self.command,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] , return_stdout=__snake_case , )
self.assertIn(
f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all' , __snake_case , )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a =run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--debug'] , return_stdout=__snake_case )
self.assertIn(
f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all' , __snake_case , )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a =run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--debug'] , return_stdout=__snake_case , )
self.assertIn(
f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all' , __snake_case , )
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
__a =run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--command',
self.command,
'--command',
'echo "Hello World"',
'--debug',
] , return_stdout=__snake_case , )
self.assertIn(
f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all' , __snake_case , )
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
__a =run_command(
self.cmd
+ ['--config_file', 'tests/test_configs/latest.yaml', '--command_file', self.command_file, '--debug'] , return_stdout=__snake_case , )
self.assertIn(
f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all' , __snake_case , )
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command_file',
self.command_file,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] , return_stdout=__snake_case , )
self.assertIn(
f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all' , __snake_case , )
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
__a =run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--debug'] , return_stdout=__snake_case , )
self.assertIn(
f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all' , __snake_case , )
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
__a =run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--install_accelerate',
'--accelerate_version',
'12.0.0',
'--debug',
] , return_stdout=__snake_case , )
self.assertIn(
f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all' , __snake_case , )
| 242 |
from jiwer import compute_measures
import datasets
_lowerCAmelCase : Union[str, Any] = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
_lowerCAmelCase : List[str] = "\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n"
_lowerCAmelCase : int = "\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> wer = datasets.load_metric(\"wer\")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/jitsi/jiwer/'] , reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
] , )
def __magic_name__ ( self , __snake_case=None , __snake_case=None , __snake_case=False ) -> Any:
'''simple docstring'''
if concatenate_texts:
return compute_measures(__snake_case , __snake_case )["wer"]
else:
__a =0
__a =0
for prediction, reference in zip(__snake_case , __snake_case ):
__a =compute_measures(__snake_case , __snake_case )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 242 | 1 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Dict=False ):
'''simple docstring'''
lowerCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'module.blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'module.blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(F'module.blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'module.blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'module.blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'module.blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'module.blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'module.blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'module.blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'module.blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
("""module.cls_token""", """vit.embeddings.cls_token"""),
("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""module.pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""module.norm.weight""", """layernorm.weight"""),
("""module.norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int]=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase = """"""
else:
lowerCAmelCase = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase = state_dict.pop(F'module.blocks.{i}.attn.qkv.weight' )
lowerCAmelCase = state_dict.pop(F'module.blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase = in_proj_bias[: config.hidden_size]
lowerCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
lowerCAmelCase = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase = [
"""module.fc.fc1.weight""",
"""module.fc.fc1.bias""",
"""module.fc.bn1.weight""",
"""module.fc.bn1.bias""",
"""module.fc.bn1.running_mean""",
"""module.fc.bn1.running_var""",
"""module.fc.bn1.num_batches_tracked""",
"""module.fc.fc2.weight""",
"""module.fc.fc2.bias""",
"""module.fc.bn2.weight""",
"""module.fc.bn2.bias""",
"""module.fc.bn2.running_mean""",
"""module.fc.bn2.running_var""",
"""module.fc.bn2.num_batches_tracked""",
"""module.fc.fc3.weight""",
"""module.fc.fc3.bias""",
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase = dct.pop(SCREAMING_SNAKE_CASE )
lowerCAmelCase = val
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
lowerCAmelCase = ViTMSNConfig()
lowerCAmelCase = 10_00
lowerCAmelCase = """datasets/huggingface/label-files"""
lowerCAmelCase = """imagenet-1k-id2label.json"""
lowerCAmelCase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , """r""" ) )
lowerCAmelCase = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowerCAmelCase = idalabel
lowerCAmelCase = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowerCAmelCase = 3_84
lowerCAmelCase = 15_36
lowerCAmelCase = 6
elif "l16" in checkpoint_url:
lowerCAmelCase = 10_24
lowerCAmelCase = 40_96
lowerCAmelCase = 24
lowerCAmelCase = 16
lowerCAmelCase = 0.1
elif "b4" in checkpoint_url:
lowerCAmelCase = 4
elif "l7" in checkpoint_url:
lowerCAmelCase = 7
lowerCAmelCase = 10_24
lowerCAmelCase = 40_96
lowerCAmelCase = 24
lowerCAmelCase = 16
lowerCAmelCase = 0.1
lowerCAmelCase = ViTMSNModel(SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location="""cpu""" )["""target_encoder"""]
lowerCAmelCase = ViTImageProcessor(size=config.image_size )
remove_projection_head(SCREAMING_SNAKE_CASE )
lowerCAmelCase = create_rename_keys(SCREAMING_SNAKE_CASE , base_model=SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
read_in_q_k_v(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , base_model=SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
lowerCAmelCase = ViTImageProcessor(
size=config.image_size , image_mean=SCREAMING_SNAKE_CASE , image_std=SCREAMING_SNAKE_CASE )
lowerCAmelCase = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
lowerCAmelCase = model(**SCREAMING_SNAKE_CASE )
lowerCAmelCase = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowerCAmelCase = torch.tensor([[-1.09_15, -1.48_76, -1.18_09]] )
elif "b16" in checkpoint_url:
lowerCAmelCase = torch.tensor([[14.28_89, -18.90_45, 11.72_81]] )
elif "l16" in checkpoint_url:
lowerCAmelCase = torch.tensor([[41.50_28, -22.86_81, 45.64_75]] )
elif "b4" in checkpoint_url:
lowerCAmelCase = torch.tensor([[-4.38_68, 5.29_32, -0.41_37]] )
else:
lowerCAmelCase = torch.tensor([[-0.17_92, -0.64_65, 2.42_63]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 711 |
"""simple docstring"""
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise TypeError("""Input value must be an 'int' type""" )
lowerCAmelCase = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 393 | 0 |
from collections.abc import Generator
def UpperCAmelCase__ ( ):
__a , __a : Tuple = 0, 1
while True:
__a , __a : Dict = b, a + b
yield b
def UpperCAmelCase__ ( lowerCamelCase_ : int = 1_0_0_0 ):
__a : str = 1
__a : Dict = fibonacci_generator()
while len(str(next(lowerCamelCase_ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 47 |
from sklearn.metrics import recall_score
import datasets
a_ :int = '\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n'
a_ :Union[str, Any] = '\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n - `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {\'recall\': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {\'recall\': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {\'recall\': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'recall\': array([1., 0., 0.])}\n'
a_ :Optional[Any] = '\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def lowercase__ ( self : int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'''] , )
def lowercase__ ( self : Tuple , _lowercase : Optional[Any] , _lowercase : Optional[Any] , _lowercase : Optional[int]=None , _lowercase : Tuple=1 , _lowercase : List[Any]="binary" , _lowercase : Any=None , _lowercase : Optional[int]="warn" , ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = recall_score(
_lowercase , _lowercase , labels=_lowercase , pos_label=_lowercase , average=_lowercase , sample_weight=_lowercase , zero_division=_lowercase , )
return {"recall": float(_lowercase ) if score.size == 1 else score}
| 35 | 0 |
def __lowercase( __snake_case : float ,__snake_case : float ) -> float:
if density <= 0:
raise ValueError('Impossible fluid density' )
if bulk_modulus <= 0:
raise ValueError('Impossible bulk modulus' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714 |
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __lowercase( __snake_case : List[str] ,__snake_case : Optional[int] ,__snake_case : Dict ,__snake_case : List[str] ,__snake_case : Optional[int] ) -> int:
# Load configuration defined in the metadata file
with open(__snake_case ) as metadata_file:
__snake_case = json.load(__snake_case )
__snake_case = LukeConfig(use_entity_aware_attention=__snake_case ,**metadata['model_config'] )
# Load in the weights from the checkpoint_path
__snake_case = torch.load(__snake_case ,map_location='cpu' )['module']
# Load the entity vocab file
__snake_case = load_original_entity_vocab(__snake_case )
# add an entry for [MASK2]
__snake_case = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
__snake_case = XLMRobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
__snake_case = AddedToken('<ent>' ,lstrip=__snake_case ,rstrip=__snake_case )
__snake_case = AddedToken('<ent2>' ,lstrip=__snake_case ,rstrip=__snake_case )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(__snake_case )
with open(os.path.join(__snake_case ,'tokenizer_config.json' ) ,'r' ) as f:
__snake_case = json.load(__snake_case )
__snake_case = 'MLukeTokenizer'
with open(os.path.join(__snake_case ,'tokenizer_config.json' ) ,'w' ) as f:
json.dump(__snake_case ,__snake_case )
with open(os.path.join(__snake_case ,MLukeTokenizer.vocab_files_names['entity_vocab_file'] ) ,'w' ) as f:
json.dump(__snake_case ,__snake_case )
__snake_case = MLukeTokenizer.from_pretrained(__snake_case )
# Initialize the embeddings of the special tokens
__snake_case = tokenizer.convert_tokens_to_ids(['@'] )[0]
__snake_case = tokenizer.convert_tokens_to_ids(['#'] )[0]
__snake_case = state_dict['embeddings.word_embeddings.weight']
__snake_case = word_emb[ent_init_index].unsqueeze(0 )
__snake_case = word_emb[enta_init_index].unsqueeze(0 )
__snake_case = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
__snake_case = state_dict[bias_name]
__snake_case = decoder_bias[ent_init_index].unsqueeze(0 )
__snake_case = decoder_bias[enta_init_index].unsqueeze(0 )
__snake_case = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
__snake_case = f'''encoder.layer.{layer_index}.attention.self.'''
__snake_case = state_dict[prefix + matrix_name]
__snake_case = state_dict[prefix + matrix_name]
__snake_case = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
__snake_case = state_dict['entity_embeddings.entity_embeddings.weight']
__snake_case = entity_emb[entity_vocab['[MASK]']].unsqueeze(0 )
__snake_case = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
__snake_case = state_dict['entity_predictions.bias']
__snake_case = entity_prediction_bias[entity_vocab['[MASK]']].unsqueeze(0 )
__snake_case = torch.cat([entity_prediction_bias, entity_mask_bias] )
__snake_case = LukeForMaskedLM(config=__snake_case ).eval()
state_dict.pop('entity_predictions.decoder.weight' )
state_dict.pop('lm_head.decoder.weight' )
state_dict.pop('lm_head.decoder.bias' )
__snake_case = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('lm_head' ) or key.startswith('entity_predictions' )):
__snake_case = state_dict[key]
else:
__snake_case = state_dict[key]
__snake_case , __snake_case = model.load_state_dict(__snake_case ,strict=__snake_case )
if set(__snake_case ) != {"luke.embeddings.position_ids"}:
raise ValueError(f'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(__snake_case ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
__snake_case = MLukeTokenizer.from_pretrained(__snake_case ,task='entity_classification' )
__snake_case = 'ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'
__snake_case = (0, 9)
__snake_case = tokenizer(__snake_case ,entity_spans=[span] ,return_tensors='pt' )
__snake_case = model(**__snake_case )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__snake_case = torch.Size((1, 33, 7_68) )
__snake_case = torch.tensor([[0.08_92, 0.05_96, -0.28_19], [0.01_34, 0.11_99, 0.05_73], [-0.01_69, 0.09_27, 0.06_44]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,__snake_case ,atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__snake_case = torch.Size((1, 1, 7_68) )
__snake_case = torch.tensor([[-0.14_82, 0.06_09, 0.03_22]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
f''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,__snake_case ,atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
__snake_case = MLukeTokenizer.from_pretrained(__snake_case )
__snake_case = 'Tokyo is the capital of <mask>.'
__snake_case = (24, 30)
__snake_case = tokenizer(__snake_case ,entity_spans=[span] ,return_tensors='pt' )
__snake_case = model(**__snake_case )
__snake_case = encoding['input_ids'][0].tolist()
__snake_case = input_ids.index(tokenizer.convert_tokens_to_ids('<mask>' ) )
__snake_case = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(__snake_case )
__snake_case = outputs.entity_logits[0][0].argmax().item()
__snake_case = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('en:' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(__snake_case ) )
model.save_pretrained(__snake_case )
def __lowercase( __snake_case : int ) -> Any:
__snake_case = ['[MASK]', '[PAD]', '[UNK]']
__snake_case = [json.loads(__snake_case ) for line in open(__snake_case )]
__snake_case = {}
for entry in data:
__snake_case = entry['id']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
__snake_case = entity_id
break
__snake_case = f'''{language}:{entity_name}'''
__snake_case = entity_id
return new_mapping
if __name__ == "__main__":
lowerCamelCase_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
lowerCamelCase_ : Dict = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 345 | 0 |
def snake_case ( lowerCamelCase = 50 ):
'''simple docstring'''
__lowercase = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 80 | '''simple docstring'''
def snake_case__ ( _A: int ) -> list[int]:
'''simple docstring'''
if length <= 0 or not isinstance(_A , _A ):
raise ValueError("""Length must be a positive integer.""" )
return [n * (2 * n - 1) for n in range(_A )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=1_0))
| 370 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''facebook/bart-large-mnli'''
__A = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
__A = '''text_classifier'''
__A = AutoTokenizer
__A = AutoModelForSequenceClassification
__A = ['''text''', ['''text''']]
__A = ['''text''']
def __UpperCAmelCase ( self : Any) -> Dict:
"""simple docstring"""
super().setup()
_UpperCamelCase = self.model.config
_UpperCamelCase = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("entail"):
_UpperCamelCase = int(lowercase_)
if self.entailment_id == -1:
raise ValueError("Could not determine the entailment ID from the model config, please pass it at init.")
def __UpperCAmelCase ( self : Any , lowercase_ : List[Any] , lowercase_ : Tuple) -> Dict:
"""simple docstring"""
_UpperCamelCase = labels
return self.pre_processor(
[text] * len(lowercase_) , [f'This example is {label}' for label in labels] , return_tensors="pt" , padding="max_length" , )
def __UpperCAmelCase ( self : Tuple , lowercase_ : Union[str, Any]) -> str:
"""simple docstring"""
_UpperCamelCase = outputs.logits
_UpperCamelCase = torch.argmax(logits[:, 2]).item()
return self._labels[label_id]
| 710 | import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase ( enum.Enum ):
'''simple docstring'''
__A = 0
__A = 1
@add_end_docstrings(lowerCAmelCase )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''generated'''
def __init__( self : Any , *lowercase_ : Dict , **lowercase_ : Tuple) -> List[Any]:
"""simple docstring"""
super().__init__(*lowercase_ , **lowercase_)
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING)
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : Union[str, Any]=None , lowercase_ : Optional[Any]=None , lowercase_ : Optional[int]=None , lowercase_ : Optional[Any]=None , lowercase_ : Any=None , lowercase_ : Union[str, Any]=None , **lowercase_ : Optional[Any] , ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = {}
if truncation is not None:
_UpperCamelCase = truncation
_UpperCamelCase = generate_kwargs
_UpperCamelCase = {}
if return_tensors is not None and return_type is None:
_UpperCamelCase = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
_UpperCamelCase = return_type
if clean_up_tokenization_spaces is not None:
_UpperCamelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
_UpperCamelCase = self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_)
if len(lowercase_) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim.")
_UpperCamelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __UpperCAmelCase ( self : int , lowercase_ : int , lowercase_ : int , lowercase_ : int) -> Any:
"""simple docstring"""
return True
def __UpperCAmelCase ( self : Dict , *lowercase_ : List[str] , lowercase_ : List[Any]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = self.model.config.prefix if self.model.config.prefix is not None else ""
if isinstance(args[0] , lowercase_):
if self.tokenizer.pad_token_id is None:
raise ValueError("Please make sure that the tokenizer has a pad_token_id when using a batch input")
_UpperCamelCase = ([prefix + arg for arg in args[0]],)
_UpperCamelCase = True
elif isinstance(args[0] , lowercase_):
_UpperCamelCase = (prefix + args[0],)
_UpperCamelCase = False
else:
raise ValueError(
f' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`')
_UpperCamelCase = self.tokenizer(*lowercase_ , padding=lowercase_ , truncation=lowercase_ , return_tensors=self.framework)
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : List[Any] , *lowercase_ : Any , **lowercase_ : int) -> Dict:
"""simple docstring"""
_UpperCamelCase = super().__call__(*lowercase_ , **lowercase_)
if (
isinstance(args[0] , lowercase_)
and all(isinstance(lowercase_ , lowercase_) for el in args[0])
and all(len(lowercase_) == 1 for res in result)
):
return [res[0] for res in result]
return result
def __UpperCAmelCase ( self : Tuple , lowercase_ : Union[str, Any] , lowercase_ : str=TruncationStrategy.DO_NOT_TRUNCATE , **lowercase_ : Dict) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = self._parse_and_tokenize(lowercase_ , truncation=lowercase_ , **lowercase_)
return inputs
def __UpperCAmelCase ( self : str , lowercase_ : str , **lowercase_ : str) -> str:
"""simple docstring"""
if self.framework == "pt":
_UpperCamelCase , _UpperCamelCase = model_inputs["input_ids"].shape
elif self.framework == "tf":
_UpperCamelCase , _UpperCamelCase = tf.shape(model_inputs["input_ids"]).numpy()
_UpperCamelCase = generate_kwargs.get("min_length" , self.model.config.min_length)
_UpperCamelCase = generate_kwargs.get("max_length" , self.model.config.max_length)
self.check_inputs(lowercase_ , generate_kwargs["min_length"] , generate_kwargs["max_length"])
_UpperCamelCase = self.model.generate(**lowercase_ , **lowercase_)
_UpperCamelCase = output_ids.shape[0]
if self.framework == "pt":
_UpperCamelCase = output_ids.reshape(lowercase_ , out_b // in_b , *output_ids.shape[1:])
elif self.framework == "tf":
_UpperCamelCase = tf.reshape(lowercase_ , (in_b, out_b // in_b, *output_ids.shape[1:]))
return {"output_ids": output_ids}
def __UpperCAmelCase ( self : Dict , lowercase_ : str , lowercase_ : int=ReturnType.TEXT , lowercase_ : int=False) -> Tuple:
"""simple docstring"""
_UpperCamelCase = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
_UpperCamelCase = {f'{self.return_name}_token_ids': output_ids}
elif return_type == ReturnType.TEXT:
_UpperCamelCase = {
f'{self.return_name}_text': self.tokenizer.decode(
lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ , )
}
records.append(lowercase_)
return records
@add_end_docstrings(lowerCAmelCase )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''summary'''
def __call__( self : Optional[Any] , *lowercase_ : int , **lowercase_ : Dict) -> Optional[int]:
"""simple docstring"""
return super().__call__(*lowercase_ , **lowercase_)
def __UpperCAmelCase ( self : List[str] , lowercase_ : int , lowercase_ : int , lowercase_ : int) -> bool:
"""simple docstring"""
if max_length < min_length:
logger.warning(f'Your min_length={min_length} must be inferior than your max_length={max_length}.')
if input_length < max_length:
logger.warning(
f'Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '
"a summarization task, where outputs shorter than the input are typically wanted, you might "
f'consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})')
@add_end_docstrings(lowerCAmelCase )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''translation'''
def __UpperCAmelCase ( self : Dict , lowercase_ : int , lowercase_ : int , lowercase_ : int) -> int:
"""simple docstring"""
if input_length > 0.9 * max_length:
logger.warning(
f'Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '
"increasing your max_length manually, e.g. translator('...', max_length=400)")
return True
def __UpperCAmelCase ( self : Tuple , *lowercase_ : Any , lowercase_ : List[Any]=TruncationStrategy.DO_NOT_TRUNCATE , lowercase_ : Any=None , lowercase_ : Optional[Any]=None) -> List[str]:
"""simple docstring"""
if getattr(self.tokenizer , "_build_translation_inputs" , lowercase_):
return self.tokenizer._build_translation_inputs(
*lowercase_ , return_tensors=self.framework , truncation=lowercase_ , src_lang=lowercase_ , tgt_lang=lowercase_)
else:
return super()._parse_and_tokenize(*lowercase_ , truncation=lowercase_)
def __UpperCAmelCase ( self : List[str] , lowercase_ : Dict=None , lowercase_ : str=None , **lowercase_ : List[Any]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = super()._sanitize_parameters(**lowercase_)
if src_lang is not None:
_UpperCamelCase = src_lang
if tgt_lang is not None:
_UpperCamelCase = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
_UpperCamelCase = kwargs.get("task" , self.task)
_UpperCamelCase = task.split("_")
if task and len(lowercase_) == 4:
# translation, XX, to YY
_UpperCamelCase = items[1]
_UpperCamelCase = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : List[str] , *lowercase_ : List[str] , **lowercase_ : str) -> Union[str, Any]:
"""simple docstring"""
return super().__call__(*lowercase_ , **lowercase_)
| 82 | 0 |
'''simple docstring'''
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
A_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase_ ( lowerCAmelCase_ ):
def __init__( self : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Tuple=768 ):
super().__init__(__lowerCamelCase )
snake_case__ : str = proj_size
snake_case__ : List[str] = CLIPVisionModel(__lowerCamelCase )
snake_case__ : Optional[Any] = PaintByExampleMapper(__lowerCamelCase )
snake_case__ : Optional[Any] = nn.LayerNorm(config.hidden_size )
snake_case__ : int = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
snake_case__ : Tuple = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def _lowerCAmelCase ( self : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[int]=False ):
snake_case__ : Optional[int] = self.model(pixel_values=__lowerCamelCase )
snake_case__ : int = clip_output.pooler_output
snake_case__ : List[Any] = self.mapper(latent_states[:, None] )
snake_case__ : List[Any] = self.final_layer_norm(__lowerCamelCase )
snake_case__ : Union[str, Any] = self.proj_out(__lowerCamelCase )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class lowercase_ ( nn.Module ):
def __init__( self : List[Any] , __lowerCamelCase : str ):
super().__init__()
snake_case__ : Any = (config.num_hidden_layers + 1) // 5
snake_case__ : str = config.hidden_size
snake_case__ : str = 1
snake_case__ : int = nn.ModuleList(
[
BasicTransformerBlock(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , activation_fn='gelu' , attention_bias=__lowerCamelCase )
for _ in range(__lowerCamelCase )
] )
def _lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : Optional[Any] ):
for block in self.blocks:
snake_case__ : str = block(__lowerCamelCase )
return hidden_states
| 270 |
'''simple docstring'''
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 270 | 1 |
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
_A : Optional[Any] ='''
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
'''
_A : Dict ='''
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results[\'pearsonr\'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
[\'p-value\', \'pearsonr\']
>>> print(round(results[\'pearsonr\'], 2))
-0.74
>>> print(round(results[\'p-value\'], 2))
0.15
'''
_A : List[Any] ='''
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase__ ( datasets.Metric ):
'''simple docstring'''
def __UpperCAmelCase ( self : str ) -> List[str]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('float' ),
'references': datasets.Value('float' ),
} ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'] , )
def __UpperCAmelCase ( self : int , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : List[str]=False ) -> List[str]:
'''simple docstring'''
if return_pvalue:
_lowercase : Tuple = pearsonr(UpperCamelCase_ , UpperCamelCase_ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(UpperCamelCase_ , UpperCamelCase_ )[0] )}
| 4 |
'''simple docstring'''
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def __UpperCamelCase ( _lowercase ) -> Tuple:
_lowercase : int = torch.exp(_lowercase )
_lowercase : List[str] = torch.sum(_lowercase, dim=1 ) # sum of exp(x_i)
_lowercase : str = torch.sum(x * exp_x, dim=1 ) # sum of x_i * exp(x_i)
return torch.log(_lowercase ) - B / A
class lowerCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase_ : List[str] ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
_lowercase : int = config.output_attentions
_lowercase : int = config.output_hidden_states
_lowercase : Union[str, Any] = nn.ModuleList([BertLayer(UpperCamelCase_ ) for _ in range(config.num_hidden_layers )] )
_lowercase : List[Any] = nn.ModuleList([BertHighway(UpperCamelCase_ ) for _ in range(config.num_hidden_layers )] )
_lowercase : Tuple = [-1 for _ in range(config.num_hidden_layers )]
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : str ) -> int:
'''simple docstring'''
if (type(UpperCamelCase_ ) is float) or (type(UpperCamelCase_ ) is int):
for i in range(len(self.early_exit_entropy ) ):
_lowercase : Optional[Any] = x
else:
_lowercase : Optional[int] = x
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : List[Any] ) -> Dict:
'''simple docstring'''
_lowercase : Optional[int] = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def __UpperCAmelCase ( self : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Optional[Any]=None , ) -> Optional[int]:
'''simple docstring'''
_lowercase : int = ()
_lowercase : List[Any] = ()
_lowercase : Tuple = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
_lowercase : Optional[int] = all_hidden_states + (hidden_states,)
_lowercase : str = layer_module(
UpperCamelCase_ , UpperCamelCase_ , head_mask[i] , UpperCamelCase_ , UpperCamelCase_ )
_lowercase : List[str] = layer_outputs[0]
if self.output_attentions:
_lowercase : Tuple = all_attentions + (layer_outputs[1],)
_lowercase : Optional[int] = (hidden_states,)
if self.output_hidden_states:
_lowercase : str = current_outputs + (all_hidden_states,)
if self.output_attentions:
_lowercase : Optional[int] = current_outputs + (all_attentions,)
_lowercase : List[Any] = self.highway[i](UpperCamelCase_ )
# logits, pooled_output
if not self.training:
_lowercase : Dict = highway_exit[0]
_lowercase : Tuple = entropy(UpperCamelCase_ )
_lowercase : Dict = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
_lowercase : str = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
_lowercase : Tuple = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(UpperCamelCase_ , i + 1 )
else:
_lowercase : Optional[int] = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
_lowercase : str = all_hidden_states + (hidden_states,)
_lowercase : Optional[Any] = (hidden_states,)
if self.output_hidden_states:
_lowercase : Dict = outputs + (all_hidden_states,)
if self.output_attentions:
_lowercase : Optional[Any] = outputs + (all_attentions,)
_lowercase : Optional[int] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"""The Bert Model transformer with early exiting (DeeBERT). """ , A , )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
super().__init__(UpperCamelCase_ )
_lowercase : int = config
_lowercase : int = BertEmbeddings(UpperCamelCase_ )
_lowercase : List[Any] = DeeBertEncoder(UpperCamelCase_ )
_lowercase : Any = BertPooler(UpperCamelCase_ )
self.init_weights()
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
'''simple docstring'''
self.encoder.init_highway_pooler(self.pooler )
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
return self.embeddings.word_embeddings
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : Dict ) -> Any:
'''simple docstring'''
_lowercase : Optional[Any] = value
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : int ) -> Union[str, Any]:
'''simple docstring'''
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(UpperCamelCase_ )
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : str=None , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : int=None , UpperCamelCase_ : Tuple=None , ) -> Union[str, Any]:
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
_lowercase : Any = input_ids.size()
elif inputs_embeds is not None:
_lowercase : Any = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
_lowercase : str = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_lowercase : Tuple = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ )
if encoder_attention_mask is None:
_lowercase : Dict = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ )
if token_type_ids is None:
_lowercase : int = torch.zeros(UpperCamelCase_ , dtype=torch.long , device=UpperCamelCase_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_lowercase : torch.Tensor = self.get_extended_attention_mask(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
_lowercase : int = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
_lowercase : int = encoder_attention_mask[:, None, None, :]
_lowercase : str = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
_lowercase : Optional[int] = (1.0 - encoder_extended_attention_mask) * -1_00_00.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_lowercase : Optional[int] = self.get_head_mask(UpperCamelCase_ , self.config.num_hidden_layers )
_lowercase : Dict = self.embeddings(
input_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ )
_lowercase : List[Any] = self.encoder(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , )
_lowercase : int = encoder_outputs[0]
_lowercase : str = self.pooler(UpperCamelCase_ )
_lowercase : List[Any] = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : Dict ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Any = message
_lowercase : Dict = exit_layer # start from 1!
class lowerCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : List[str] ) -> Dict:
'''simple docstring'''
super().__init__()
_lowercase : Optional[Any] = BertPooler(UpperCamelCase_ )
_lowercase : List[Any] = nn.Dropout(config.hidden_dropout_prob )
_lowercase : int = nn.Linear(config.hidden_size , config.num_labels )
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : Optional[int] ) -> List[Any]:
'''simple docstring'''
_lowercase : str = encoder_outputs[0]
_lowercase : int = self.pooler(UpperCamelCase_ )
# "return" pooler_output
# BertModel
_lowercase : Optional[int] = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
_lowercase : Dict = bmodel_output[1]
_lowercase : Union[str, Any] = self.dropout(UpperCamelCase_ )
_lowercase : str = self.classifier(UpperCamelCase_ )
return logits, pooled_output
@add_start_docstrings(
"""Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. """ , A , )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : List[Any] ) -> List[str]:
'''simple docstring'''
super().__init__(UpperCamelCase_ )
_lowercase : Dict = config.num_labels
_lowercase : Any = config.num_hidden_layers
_lowercase : Optional[int] = DeeBertModel(UpperCamelCase_ )
_lowercase : Any = nn.Dropout(config.hidden_dropout_prob )
_lowercase : Optional[Any] = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : str=-1 , UpperCamelCase_ : Union[str, Any]=False , ) -> Tuple:
'''simple docstring'''
_lowercase : Union[str, Any] = self.num_layers
try:
_lowercase : Tuple = self.bert(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
_lowercase : List[Any] = outputs[1]
_lowercase : int = self.dropout(UpperCamelCase_ )
_lowercase : Optional[int] = self.classifier(UpperCamelCase_ )
_lowercase : Union[str, Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_lowercase : Union[str, Any] = e.message
_lowercase : Any = e.exit_layer
_lowercase : Optional[int] = outputs[0]
if not self.training:
_lowercase : Union[str, Any] = entropy(UpperCamelCase_ )
_lowercase : Tuple = []
_lowercase : Tuple = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_lowercase : Tuple = MSELoss()
_lowercase : Tuple = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_lowercase : Union[str, Any] = CrossEntropyLoss()
_lowercase : Tuple = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_lowercase : Optional[Any] = []
for highway_exit in outputs[-1]:
_lowercase : Optional[Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCamelCase_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_lowercase : Union[str, Any] = MSELoss()
_lowercase : Any = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_lowercase : Dict = CrossEntropyLoss()
_lowercase : Optional[int] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(UpperCamelCase_ )
if train_highway:
_lowercase : List[str] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_lowercase : Optional[Any] = (loss,) + outputs
if not self.training:
_lowercase : List[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_lowercase : Dict = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 4 | 1 |
from string import ascii_lowercase, ascii_uppercase
def _lowercase( __a : str ):
if not sentence:
return ""
a__ =dict(zip(__a , __a ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 20 |
'''simple docstring'''
def snake_case ( snake_case : list ) -> list:
"""simple docstring"""
if any(not isinstance(snake_case , snake_case ) or x < 0 for x in sequence ):
raise TypeError('Sequence must be list of non-negative integers' )
for _ in range(len(snake_case ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(snake_case , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 284 | 0 |
"""simple docstring"""
from pathlib import Path
import numpy as np
from PIL import Image
def A_ ( UpperCAmelCase__ ) -> np.ndarray:
a , a , a : str = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2_9_8_9 * r + 0.5_8_7_0 * g + 0.1_1_4_0 * b
def A_ ( UpperCAmelCase__ ) -> np.ndarray:
return (gray > 127) & (gray <= 255)
def A_ ( UpperCAmelCase__ , UpperCAmelCase__ ) -> np.ndarray:
a : Dict = np.zeros_like(UpperCAmelCase__ )
a : int = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
a : Tuple = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
a : Any = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
a : int = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
SCREAMING_SNAKE_CASE__ : Any = Path(__file__).resolve().parent / "image_data" / "lena.jpg"
SCREAMING_SNAKE_CASE__ : Tuple = np.array(Image.open(lena_path))
# kernel to be applied
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
SCREAMING_SNAKE_CASE__ : Union[str, Any] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
SCREAMING_SNAKE_CASE__ : Optional[Any] = Image.fromarray(output).convert("RGB")
pil_img.save("result_dilation.png")
| 509 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A_ ( _UpperCAmelCase ):
"""simple docstring"""
lowercase : str = ["image_processor", "tokenizer"]
lowercase : Union[str, Any] = "FlavaImageProcessor"
lowercase : Dict = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Tuple:
a : List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __UpperCAmelCase , )
a : Any = kwargs.pop('feature_extractor' )
a : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
a : Optional[Any] = self.image_processor
def __call__( self , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = 0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> List[str]:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
a : Tuple = self.tokenizer(
text=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , stride=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_overflowing_tokens=__UpperCAmelCase , return_special_tokens_mask=__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , return_length=__UpperCAmelCase , verbose=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase , )
if images is not None:
a : Tuple = self.image_processor(
__UpperCAmelCase , return_image_mask=__UpperCAmelCase , return_codebook_pixels=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase , )
if text is not None and images is not None:
encoding.update(__UpperCAmelCase )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase )
def lowercase_ ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def lowercase_ ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def lowercase_ ( self ) -> str:
a : str = self.tokenizer.model_input_names
a : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowercase_ ( self ) -> List[Any]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __UpperCAmelCase , )
return self.image_processor_class
@property
def lowercase_ ( self ) -> Any:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __UpperCAmelCase , )
return self.image_processor
| 509 | 1 |
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class lowerCAmelCase__ ( UpperCAmelCase_ ):
UpperCamelCase_ : Optional[Any] = DistilBertTokenizer
UpperCamelCase_ : Union[str, Any] = DistilBertTokenizerFast
UpperCamelCase_ : int = True
@slow
def A_ ( self ) -> Any:
'''simple docstring'''
_UpperCamelCase = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" )
_UpperCamelCase = tokenizer.encode("""sequence builders""" , add_special_tokens=lowerCAmelCase__ )
_UpperCamelCase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowerCAmelCase__ )
_UpperCamelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ )
_UpperCamelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 612 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
lowerCAmelCase : str = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase : Tuple = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase : Union[str, Any] = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase : List[str] = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase : int = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 512,
"""facebook/dpr-ctx_encoder-multiset-base""": 512,
}
lowerCAmelCase : int = {
"""facebook/dpr-question_encoder-single-nq-base""": 512,
"""facebook/dpr-question_encoder-multiset-base""": 512,
}
lowerCAmelCase : List[Any] = {
"""facebook/dpr-reader-single-nq-base""": 512,
"""facebook/dpr-reader-multiset-base""": 512,
}
lowerCAmelCase : Optional[int] = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
lowerCAmelCase : Optional[int] = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
lowerCAmelCase : List[str] = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Any = VOCAB_FILES_NAMES
_UpperCAmelCase : Optional[Any] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : List[Any] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : List[Any] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES
_UpperCAmelCase : Optional[int] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Any = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : str = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase : List[Any] = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
lowerCAmelCase : Optional[Any] = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
lowerCAmelCase : int = R"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(UpperCAmelCase_ )
class __lowercase :
"""simple docstring"""
def __call__( self : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[str] = None , lowerCAmelCase__ : Optional[str] = None , lowerCAmelCase__ : Union[bool, str] = False , lowerCAmelCase__ : Union[bool, str] = False , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , lowerCAmelCase__ : Optional[bool] = None , **lowerCAmelCase__ : Tuple , ):
if titles is None and texts is None:
return super().__call__(
lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , )
elif titles is None or texts is None:
SCREAMING_SNAKE_CASE_: List[str] = titles if texts is None else texts
return super().__call__(
lowerCAmelCase__ , lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_: Optional[int] = titles if not isinstance(lowerCAmelCase__ , lowerCAmelCase__) else [titles]
SCREAMING_SNAKE_CASE_: int = texts if not isinstance(lowerCAmelCase__ , lowerCAmelCase__) else [texts]
SCREAMING_SNAKE_CASE_: str = len(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = questions if not isinstance(lowerCAmelCase__ , lowerCAmelCase__) else [questions] * n_passages
if len(lowerCAmelCase__) != len(lowerCAmelCase__):
raise ValueError(
F"There should be as many titles than texts but got {len(lowerCAmelCase__)} titles and {len(lowerCAmelCase__)} texts.")
SCREAMING_SNAKE_CASE_: Optional[Any] = super().__call__(lowerCAmelCase__ , lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__)["input_ids"]
SCREAMING_SNAKE_CASE_: Union[str, Any] = super().__call__(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__)["input_ids"]
SCREAMING_SNAKE_CASE_: int = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCAmelCase__ , lowerCAmelCase__)
]
}
if return_attention_mask is not False:
SCREAMING_SNAKE_CASE_: Dict = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids])
SCREAMING_SNAKE_CASE_: int = attention_mask
return self.pad(lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors=lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : BatchEncoding , lowerCAmelCase__ : DPRReaderOutput , lowerCAmelCase__ : int = 16 , lowerCAmelCase__ : int = 64 , lowerCAmelCase__ : int = 4 , ):
SCREAMING_SNAKE_CASE_: int = reader_input["input_ids"]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = reader_output[:3]
SCREAMING_SNAKE_CASE_: Tuple = len(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = sorted(range(lowerCAmelCase__) , reverse=lowerCAmelCase__ , key=relevance_logits.__getitem__)
SCREAMING_SNAKE_CASE_: List[DPRReaderOutput] = []
for doc_id in sorted_docs:
SCREAMING_SNAKE_CASE_: Optional[int] = list(input_ids[doc_id])
# assuming question & title information is at the beginning of the sequence
SCREAMING_SNAKE_CASE_: str = sequence_ids.index(self.sep_token_id , 2) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
SCREAMING_SNAKE_CASE_: List[Any] = sequence_ids.index(self.pad_token_id)
else:
SCREAMING_SNAKE_CASE_: Dict = len(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowerCAmelCase__ , top_spans=lowerCAmelCase__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowerCAmelCase__ , start_index=lowerCAmelCase__ , end_index=lowerCAmelCase__ , text=self.decode(sequence_ids[start_index : end_index + 1]) , ))
if len(lowerCAmelCase__) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , ):
SCREAMING_SNAKE_CASE_: Any = []
for start_index, start_score in enumerate(lowerCAmelCase__):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]):
scores.append(((start_index, start_index + answer_length), start_score + end_score))
SCREAMING_SNAKE_CASE_: Union[str, Any] = sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__: x[1] , reverse=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F"Wrong span indices: [{start_index}:{end_index}]")
SCREAMING_SNAKE_CASE_: int = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F"Span is too long: {length} > {max_answer_length}")
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals):
continue
chosen_span_intervals.append((start_index, end_index))
if len(lowerCAmelCase__) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(UpperCAmelCase_ )
class __lowercase ( UpperCAmelCase_ , UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Any = VOCAB_FILES_NAMES
_UpperCAmelCase : Optional[Any] = READER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : int = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : Optional[int] = READER_PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase : str = ['''input_ids''', '''attention_mask''']
| 671 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : Union[str, Any] = "gpt_bigcode"
A : Tuple = ["past_key_values"]
A : Optional[Any] = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : int , _lowerCAmelCase : List[str]=5_02_57 , _lowerCAmelCase : Optional[int]=10_24 , _lowerCAmelCase : Union[str, Any]=7_68 , _lowerCAmelCase : Optional[int]=12 , _lowerCAmelCase : Tuple=12 , _lowerCAmelCase : str=None , _lowerCAmelCase : int="gelu_pytorch_tanh" , _lowerCAmelCase : List[str]=0.1 , _lowerCAmelCase : Union[str, Any]=0.1 , _lowerCAmelCase : Tuple=0.1 , _lowerCAmelCase : List[Any]=1e-5 , _lowerCAmelCase : Dict=0.02 , _lowerCAmelCase : int=True , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : str=5_02_56 , _lowerCAmelCase : Tuple=5_02_56 , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : int=True , **_lowerCAmelCase : str , ):
__snake_case : List[str] = vocab_size
__snake_case : Optional[int] = n_positions
__snake_case : Any = n_embd
__snake_case : Union[str, Any] = n_layer
__snake_case : Optional[Any] = n_head
__snake_case : Dict = n_inner
__snake_case : Dict = activation_function
__snake_case : Union[str, Any] = resid_pdrop
__snake_case : Union[str, Any] = embd_pdrop
__snake_case : str = attn_pdrop
__snake_case : Optional[int] = layer_norm_epsilon
__snake_case : List[Any] = initializer_range
__snake_case : Any = scale_attn_weights
__snake_case : Any = use_cache
__snake_case : int = attention_softmax_in_fpaa
__snake_case : Optional[Any] = scale_attention_softmax_in_fpaa
__snake_case : str = multi_query
__snake_case : List[Any] = bos_token_id
__snake_case : Optional[int] = eos_token_id
super().__init__(bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
| 704 | from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase_ = logging.get_logger(__name__)
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
if isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__SCREAMING_SNAKE_CASE ):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''' )
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : Any = ["pixel_values"]
def __init__( self : Optional[int] , _lowerCAmelCase : bool = True , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , _lowerCAmelCase : bool = True , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : bool = True , _lowerCAmelCase : Union[int, float] = 1 / 2_55 , _lowerCAmelCase : bool = True , _lowerCAmelCase : Optional[Union[float, List[float]]] = None , _lowerCAmelCase : Optional[Union[float, List[float]]] = None , **_lowerCAmelCase : Union[str, Any] , ):
super().__init__(**_lowerCAmelCase )
__snake_case : Tuple = size if size is not None else {"""shortest_edge""": 2_24}
__snake_case : List[Any] = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
__snake_case : int = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
__snake_case : List[Any] = get_size_dict(_lowerCAmelCase , param_name="""crop_size""" )
__snake_case : Union[str, Any] = do_resize
__snake_case : Optional[Any] = size
__snake_case : int = do_center_crop
__snake_case : Dict = crop_size
__snake_case : Dict = resample
__snake_case : Tuple = do_rescale
__snake_case : Optional[int] = rescale_factor
__snake_case : str = do_normalize
__snake_case : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__snake_case : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case__ ( self : Optional[int] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Dict[str, int] , _lowerCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : Optional[Any] , ):
__snake_case : List[str] = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
if "shortest_edge" in size:
__snake_case : Tuple = get_resize_output_image_size(_lowerCAmelCase , size["""shortest_edge"""] , default_to_square=_lowerCAmelCase )
elif "height" in size and "width" in size:
__snake_case : List[Any] = (size["""height"""], size["""width"""])
else:
raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def snake_case__ ( self : str , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Dict[str, int] , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : Optional[Any] , ):
__snake_case : List[str] = get_size_dict(_lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(_lowerCAmelCase , size=(size["""height"""], size["""width"""]) , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def snake_case__ ( self : Tuple , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Union[int, float] , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : Optional[Any] , ):
return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def snake_case__ ( self : str , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Union[float, List[float]] , _lowerCAmelCase : Union[float, List[float]] , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : Dict , ):
return normalize(_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def snake_case__ ( self : Tuple , _lowerCAmelCase : ImageInput , _lowerCAmelCase : bool = None , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : PILImageResampling = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : float = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : Optional[Union[float, List[float]]] = None , _lowerCAmelCase : Optional[Union[float, List[float]]] = None , _lowerCAmelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , ):
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
__snake_case : Tuple = to_numpy_array(_lowerCAmelCase )
if do_resize:
__snake_case : List[Any] = self.resize(image=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase )
if do_center_crop:
__snake_case : Dict = self.center_crop(_lowerCAmelCase , size=_lowerCAmelCase )
if do_rescale:
__snake_case : int = self.rescale(image=_lowerCAmelCase , scale=_lowerCAmelCase )
if do_normalize:
__snake_case : List[Any] = self.normalize(image=_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase )
__snake_case : Optional[Any] = to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase )
return image
def snake_case__ ( self : List[str] , _lowerCAmelCase : ImageInput , _lowerCAmelCase : bool = None , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : PILImageResampling = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : float = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : Optional[Union[float, List[float]]] = None , _lowerCAmelCase : Optional[Union[float, List[float]]] = None , _lowerCAmelCase : Optional[Union[str, TensorType]] = None , _lowerCAmelCase : ChannelDimension = ChannelDimension.FIRST , **_lowerCAmelCase : Union[str, Any] , ):
__snake_case : Optional[int] = do_resize if do_resize is not None else self.do_resize
__snake_case : Any = resample if resample is not None else self.resample
__snake_case : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
__snake_case : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
__snake_case : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
__snake_case : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
__snake_case : List[str] = image_mean if image_mean is not None else self.image_mean
__snake_case : List[str] = image_std if image_std is not None else self.image_std
__snake_case : Optional[Any] = size if size is not None else self.size
__snake_case : int = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
__snake_case : Any = crop_size if crop_size is not None else self.crop_size
__snake_case : List[Any] = get_size_dict(_lowerCAmelCase , param_name="""crop_size""" )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
__snake_case : Optional[Any] = make_batched(_lowerCAmelCase )
__snake_case : int = [
[
self._preprocess_image(
image=_lowerCAmelCase , do_resize=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase , do_center_crop=_lowerCAmelCase , crop_size=_lowerCAmelCase , do_rescale=_lowerCAmelCase , rescale_factor=_lowerCAmelCase , do_normalize=_lowerCAmelCase , image_mean=_lowerCAmelCase , image_std=_lowerCAmelCase , data_format=_lowerCAmelCase , )
for img in video
]
for video in videos
]
__snake_case : Optional[int] = {"""pixel_values""": videos}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
| 390 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ : Dict = {
'configuration_mobilebert': [
'MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileBertConfig',
'MobileBertOnnxConfig',
],
'tokenization_mobilebert': ['MobileBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = ['MobileBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = [
'MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileBertForMaskedLM',
'MobileBertForMultipleChoice',
'MobileBertForNextSentencePrediction',
'MobileBertForPreTraining',
'MobileBertForQuestionAnswering',
'MobileBertForSequenceClassification',
'MobileBertForTokenClassification',
'MobileBertLayer',
'MobileBertModel',
'MobileBertPreTrainedModel',
'load_tf_weights_in_mobilebert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
'TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileBertForMaskedLM',
'TFMobileBertForMultipleChoice',
'TFMobileBertForNextSentencePrediction',
'TFMobileBertForPreTraining',
'TFMobileBertForQuestionAnswering',
'TFMobileBertForSequenceClassification',
'TFMobileBertForTokenClassification',
'TFMobileBertMainLayer',
'TFMobileBertModel',
'TFMobileBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 533 |
'''simple docstring'''
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class lowercase__ :
'''simple docstring'''
def __init__( self , __snake_case , __snake_case=13 , __snake_case=32 , __snake_case=2 , __snake_case=3 , __snake_case=16 , __snake_case=[1, 2, 1] , __snake_case=[2, 2, 4] , __snake_case=2 , __snake_case=2.0 , __snake_case=True , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.1 , __snake_case="gelu" , __snake_case=False , __snake_case=True , __snake_case=0.02 , __snake_case=1e-5 , __snake_case=True , __snake_case=None , __snake_case=True , __snake_case=10 , __snake_case=8 , __snake_case=["stage1", "stage2", "stage3"] , __snake_case=[1, 2, 3] , ):
_SCREAMING_SNAKE_CASE : List[Any] = parent
_SCREAMING_SNAKE_CASE : List[str] = batch_size
_SCREAMING_SNAKE_CASE : str = image_size
_SCREAMING_SNAKE_CASE : Any = patch_size
_SCREAMING_SNAKE_CASE : int = num_channels
_SCREAMING_SNAKE_CASE : Optional[int] = embed_dim
_SCREAMING_SNAKE_CASE : List[str] = depths
_SCREAMING_SNAKE_CASE : Optional[Any] = num_heads
_SCREAMING_SNAKE_CASE : Dict = window_size
_SCREAMING_SNAKE_CASE : int = mlp_ratio
_SCREAMING_SNAKE_CASE : Optional[Any] = qkv_bias
_SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : str = drop_path_rate
_SCREAMING_SNAKE_CASE : List[Any] = hidden_act
_SCREAMING_SNAKE_CASE : List[str] = use_absolute_embeddings
_SCREAMING_SNAKE_CASE : Optional[int] = patch_norm
_SCREAMING_SNAKE_CASE : str = layer_norm_eps
_SCREAMING_SNAKE_CASE : List[Any] = initializer_range
_SCREAMING_SNAKE_CASE : Union[str, Any] = is_training
_SCREAMING_SNAKE_CASE : Tuple = scope
_SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
_SCREAMING_SNAKE_CASE : List[str] = type_sequence_label_size
_SCREAMING_SNAKE_CASE : Tuple = encoder_stride
_SCREAMING_SNAKE_CASE : Dict = out_features
_SCREAMING_SNAKE_CASE : Optional[Any] = out_indices
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE : Any = None
if self.use_labels:
_SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE : List[str] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self ):
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def UpperCAmelCase_ ( self , __snake_case , __snake_case , __snake_case ):
_SCREAMING_SNAKE_CASE : Any = MaskFormerSwinModel(config=__snake_case )
model.to(__snake_case )
model.eval()
_SCREAMING_SNAKE_CASE : Any = model(__snake_case )
_SCREAMING_SNAKE_CASE : List[str] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_SCREAMING_SNAKE_CASE : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def UpperCAmelCase_ ( self , __snake_case , __snake_case , __snake_case ):
_SCREAMING_SNAKE_CASE : str = MaskFormerSwinBackbone(config=__snake_case )
model.to(__snake_case )
model.eval()
_SCREAMING_SNAKE_CASE : Optional[int] = model(__snake_case )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(__snake_case ):
_SCREAMING_SNAKE_CASE : Optional[Any] = ["""stem"""]
_SCREAMING_SNAKE_CASE : Any = MaskFormerSwinBackbone(config=__snake_case )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = config_and_inputs
_SCREAMING_SNAKE_CASE : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase__ ( _snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
A_ : Any = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
A_ : List[Any] = {"""feature-extraction""": MaskFormerSwinModel} if is_torch_available() else {}
A_ : Optional[Any] = False
A_ : Dict = False
A_ : Optional[int] = False
A_ : Union[str, Any] = False
A_ : Union[str, Any] = False
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : str = MaskFormerSwinModelTester(self )
_SCREAMING_SNAKE_CASE : Tuple = ConfigTester(self , config_class=__snake_case , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def UpperCAmelCase_ ( self ):
pass
def UpperCAmelCase_ ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase_ ( self ):
return
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__snake_case )
@unittest.skip("""Swin does not use inputs_embeds""" )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def UpperCAmelCase_ ( self ):
pass
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : Dict = model_class(__snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_SCREAMING_SNAKE_CASE : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__snake_case , nn.Linear ) )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : Optional[int] = model_class(__snake_case )
_SCREAMING_SNAKE_CASE : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE : Optional[int] = [*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __snake_case )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def UpperCAmelCase_ ( self ):
pass
def UpperCAmelCase_ ( self , __snake_case , __snake_case , __snake_case , __snake_case ):
_SCREAMING_SNAKE_CASE : List[str] = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE : str = model(**self._prepare_for_class(__snake_case , __snake_case ) )
_SCREAMING_SNAKE_CASE : str = outputs.hidden_states
_SCREAMING_SNAKE_CASE : Optional[int] = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__snake_case ) , __snake_case )
# Swin has a different seq_length
_SCREAMING_SNAKE_CASE : Any = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_SCREAMING_SNAKE_CASE : Optional[int] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
self.check_hidden_states_output(__snake_case , __snake_case , __snake_case , __snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_SCREAMING_SNAKE_CASE : List[str] = True
self.check_hidden_states_output(__snake_case , __snake_case , __snake_case , __snake_case )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE : str = 3
_SCREAMING_SNAKE_CASE : List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_SCREAMING_SNAKE_CASE : Dict = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_SCREAMING_SNAKE_CASE : Tuple = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_SCREAMING_SNAKE_CASE : Optional[int] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : List[str] = True
self.check_hidden_states_output(__snake_case , __snake_case , __snake_case , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_SCREAMING_SNAKE_CASE : Dict = True
self.check_hidden_states_output(__snake_case , __snake_case , __snake_case , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def UpperCAmelCase_ ( self ):
pass
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(__snake_case ):
_SCREAMING_SNAKE_CASE : Optional[int] = 0
return t
def check_equivalence(__snake_case , __snake_case , __snake_case , __snake_case={} ):
with torch.no_grad():
_SCREAMING_SNAKE_CASE : Dict = model(**__snake_case , return_dict=__snake_case , **__snake_case )
_SCREAMING_SNAKE_CASE : Optional[int] = model(**__snake_case , return_dict=__snake_case , **__snake_case ).to_tuple()
def recursive_check(__snake_case , __snake_case ):
if isinstance(__snake_case , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(__snake_case , __snake_case ):
recursive_check(__snake_case , __snake_case )
elif isinstance(__snake_case , __snake_case ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(__snake_case , __snake_case )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(__snake_case ) , set_nan_tensor_to_zero(__snake_case ) , atol=1e-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
f""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"""
f""" {torch.isnan(__snake_case ).any()} and `inf`: {torch.isinf(__snake_case )}. Dict has"""
f""" `nan`: {torch.isnan(__snake_case ).any()} and `inf`: {torch.isinf(__snake_case )}."""
) , )
recursive_check(__snake_case , __snake_case )
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(__snake_case )
model.to(__snake_case )
model.eval()
_SCREAMING_SNAKE_CASE : str = self._prepare_for_class(__snake_case , __snake_case )
_SCREAMING_SNAKE_CASE : str = self._prepare_for_class(__snake_case , __snake_case )
check_equivalence(__snake_case , __snake_case , __snake_case )
_SCREAMING_SNAKE_CASE : Any = self._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
_SCREAMING_SNAKE_CASE : int = self._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
check_equivalence(__snake_case , __snake_case , __snake_case )
_SCREAMING_SNAKE_CASE : List[str] = self._prepare_for_class(__snake_case , __snake_case )
_SCREAMING_SNAKE_CASE : Optional[int] = self._prepare_for_class(__snake_case , __snake_case )
check_equivalence(__snake_case , __snake_case , __snake_case , {"""output_hidden_states""": True} )
_SCREAMING_SNAKE_CASE : List[Any] = self._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
_SCREAMING_SNAKE_CASE : Optional[Any] = self._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
check_equivalence(__snake_case , __snake_case , __snake_case , {"""output_hidden_states""": True} )
@require_torch
class lowercase__ ( unittest.TestCase , _snake_case ):
'''simple docstring'''
A_ : Any = (MaskFormerSwinBackbone,) if is_torch_available() else ()
A_ : List[Any] = MaskFormerSwinConfig
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Optional[int] = MaskFormerSwinModelTester(self )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE : Optional[int] = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : List[Any] = backbone_class(__snake_case )
backbone.to(__snake_case )
backbone.eval()
_SCREAMING_SNAKE_CASE : List[str] = backbone(**__snake_case )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , __snake_case )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
_SCREAMING_SNAKE_CASE : Optional[Any] = backbone(**__snake_case , output_hidden_states=__snake_case )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
_SCREAMING_SNAKE_CASE : Optional[int] = backbone(**__snake_case , output_attentions=__snake_case )
self.assertIsNotNone(outputs.attentions )
| 533 | 1 |
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
_lowercase = logging.getLogger()
def UpperCamelCase ( ):
lowerCAmelCase_ : Any = argparse.ArgumentParser()
parser.add_argument("-f")
lowerCAmelCase_ : List[Any] = parser.parse_args()
return args.f
class __snake_case ( snake_case__ ):
"""simple docstring"""
def UpperCAmelCase_ ( self : Optional[Any] ) -> None:
'''simple docstring'''
lowerCAmelCase_ : Dict = logging.StreamHandler(sys.stdout )
logger.addHandler(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 ,"run_glue_deebert.py" )
with patch.object(lowerCAmelCase__ ,"argv" ,lowerCAmelCase__ ):
lowerCAmelCase_ : List[str] = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowerCAmelCase__ ,0.666 )
@slow
@require_torch_non_multi_gpu
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = "\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n ".split()
self.run_and_check(lowerCAmelCase__ )
lowerCAmelCase_ : Dict = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(lowerCAmelCase__ )
lowerCAmelCase_ : Dict = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(lowerCAmelCase__ )
| 700 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = 'microsoft/speecht5_tts'
UpperCamelCase_ = (
'This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '
'text to read (in English) and returns a waveform object containing the sound.'
)
UpperCamelCase_ = 'text_reader'
UpperCamelCase_ = SpeechTaProcessor
UpperCamelCase_ = SpeechTaForTextToSpeech
UpperCamelCase_ = SpeechTaHifiGan
UpperCamelCase_ = ['text']
UpperCamelCase_ = ['audio']
def UpperCAmelCase_ ( self : Dict ) -> Any:
'''simple docstring'''
if self.post_processor is None:
lowerCAmelCase_ : Any = "microsoft/speecht5_hifigan"
super().setup()
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : Optional[int] ,lowerCAmelCase__ : Optional[int]=None ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Any = self.pre_processor(text=lowerCAmelCase__ ,return_tensors="pt" ,truncation=lowerCAmelCase__ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("Datasets needs to be installed if not passing speaker embeddings." )
lowerCAmelCase_ : str = load_dataset("Matthijs/cmu-arctic-xvectors" ,split="validation" )
lowerCAmelCase_ : List[Any] = torch.tensor(embeddings_dataset[73_05]["xvector"] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
with torch.no_grad():
return self.model.generate_speech(**lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : str ) -> Any:
'''simple docstring'''
with torch.no_grad():
return self.post_processor(lowerCAmelCase__ ).cpu().detach()
| 683 | 0 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'The `image_to_image.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionImg2ImgPipeline` instead.'
)
| 473 |
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class __lowercase (_UpperCAmelCase ):
def UpperCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = {'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']}
return Dataset.from_dict(A_ )
def UpperCamelCase__ ( self ) ->Any:
'''simple docstring'''
__lowerCAmelCase : int = self._create_example_records()
__lowerCAmelCase : Dict = Dataset.from_list(A_ )
self.assertListEqual(dset.column_names , ['''col_1''', '''col_2'''] )
for i, r in enumerate(A_ ):
self.assertDictEqual(A_ , example_records[i] )
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
__lowerCAmelCase : int = self._create_example_records()
__lowerCAmelCase : Optional[Any] = Dataset.from_list(A_ )
__lowerCAmelCase : int = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def UpperCamelCase__ ( self ) ->Union[str, Any]: # checks what happens with missing columns
'''simple docstring'''
__lowerCAmelCase : List[Any] = [{'''col_1''': 1}, {'''col_2''': '''x'''}]
__lowerCAmelCase : Union[str, Any] = Dataset.from_list(A_ )
self.assertDictEqual(dset[0] , {'''col_1''': 1} )
self.assertDictEqual(dset[1] , {'''col_1''': None} ) # NB: first record is used for columns
def UpperCamelCase__ ( self ) ->Tuple: # checks if the type can be inferred from the second record
'''simple docstring'''
__lowerCAmelCase : int = [{'''col_1''': []}, {'''col_1''': [1, 2]}]
__lowerCAmelCase : Union[str, Any] = Dataset.from_list(A_ )
self.assertEqual(dset.info.features['''col_1'''] , Sequence(Value('''int64''' ) ) )
def UpperCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
__lowerCAmelCase : Any = Dataset.from_list([] )
self.assertEqual(len(A_ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 492 | 0 |
def _UpperCamelCase ( lowerCAmelCase__: List[Any] ) -> bool:
SCREAMING_SNAKE_CASE_ = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 719 |
'''simple docstring'''
from math import ceil
def _UpperCamelCase ( lowerCAmelCase__: int = 1001 ) -> int:
SCREAMING_SNAKE_CASE_ = 1
for i in range(1 ,int(ceil(n / 2.0 ) ) ):
SCREAMING_SNAKE_CASE_ = 2 * i + 1
SCREAMING_SNAKE_CASE_ = 2 * i
SCREAMING_SNAKE_CASE_ = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
SCREAMING_SNAKE_CASE : List[str] = int(sys.argv[1])
print(solution(n))
except ValueError:
print("Invalid entry - please enter a number")
| 238 | 0 |
"""simple docstring"""
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_lowerCAmelCase :Optional[int] = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
_lowerCAmelCase :Union[str, Any] = {
# fairseq:
'wmt19-ru-en': {'length_penalty': 1.1},
'wmt19-en-ru': {'length_penalty': 1.15},
'wmt19-en-de': {'length_penalty': 1.0},
'wmt19-de-en': {'length_penalty': 1.1},
# allenai:
'wmt16-en-de-dist-12-1': {'length_penalty': 0.6},
'wmt16-en-de-dist-6-1': {'length_penalty': 0.6},
'wmt16-en-de-12-1': {'length_penalty': 0.8},
'wmt19-de-en-6-6-base': {'length_penalty': 0.6},
'wmt19-de-en-6-6-big': {'length_penalty': 0.6},
}
# this remaps the different models to their organization names
_lowerCAmelCase :List[str] = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
_lowerCAmelCase :Optional[int] = 'facebook'
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
_lowerCAmelCase :Union[str, Any] = 'allenai'
def lowerCamelCase_ (UpperCamelCase__ : Optional[int] ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
_UpperCAmelCase : str = dict((re.sub(r'''@@$''' , '''''' , UpperCamelCase__ ), v) if k.endswith('''@@''' ) else (re.sub(r'''$''' , '''</w>''' , UpperCamelCase__ ), v) for k, v in d.items() )
_UpperCAmelCase : Optional[int] = '''<s> <pad> </s> <unk>'''.split()
# restore the special tokens
for k in keep_keys:
del da[F'{k}</w>']
_UpperCAmelCase : List[Any] = d[k] # restore
return da
def lowerCamelCase_ (UpperCamelCase__ : Dict , UpperCamelCase__ : Dict ):
# prep
assert os.path.exists(UpperCamelCase__ )
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
print(F'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
_UpperCAmelCase : Union[str, Any] = basename(UpperCamelCase__ )
_UpperCAmelCase : str = dirname(UpperCamelCase__ )
_UpperCAmelCase : Union[str, Any] = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
_UpperCAmelCase : Tuple = cls.hub_models()
_UpperCAmelCase : Optional[int] = {'''bpe''': '''fastbpe''', '''tokenizer''': '''moses'''}
_UpperCAmelCase : Union[str, Any] = '''.'''
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(F'using checkpoint {checkpoint_file}' )
_UpperCAmelCase : Optional[Any] = hub_utils.from_pretrained(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , archive_map=UpperCamelCase__ , **UpperCamelCase__ )
_UpperCAmelCase : List[Any] = vars(chkpt['''args''']['''model'''] )
_UpperCAmelCase : Tuple = args['''source_lang''']
_UpperCAmelCase : Tuple = args['''target_lang''']
_UpperCAmelCase : str = dirname(UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = basename(UpperCamelCase__ )
# dicts
_UpperCAmelCase : Tuple = os.path.join(UpperCamelCase__ , F'dict.{src_lang}.txt' )
_UpperCAmelCase : Optional[int] = os.path.join(UpperCamelCase__ , F'dict.{tgt_lang}.txt' )
_UpperCAmelCase : Optional[int] = Dictionary.load(UpperCamelCase__ )
_UpperCAmelCase : List[str] = rewrite_dict_keys(src_dict.indices )
_UpperCAmelCase : Optional[int] = len(UpperCamelCase__ )
_UpperCAmelCase : Union[str, Any] = os.path.join(UpperCamelCase__ , '''vocab-src.json''' )
print(F'Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records' )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(UpperCamelCase__ , ensure_ascii=UpperCamelCase__ , indent=UpperCamelCase__ ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
_UpperCAmelCase : Optional[int] = True
for k in src_vocab.keys():
if not k.islower():
_UpperCAmelCase : Optional[Any] = False
break
_UpperCAmelCase : Dict = Dictionary.load(UpperCamelCase__ )
_UpperCAmelCase : Any = rewrite_dict_keys(tgt_dict.indices )
_UpperCAmelCase : List[str] = len(UpperCamelCase__ )
_UpperCAmelCase : Optional[int] = os.path.join(UpperCamelCase__ , '''vocab-tgt.json''' )
print(F'Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records' )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(UpperCamelCase__ , ensure_ascii=UpperCamelCase__ , indent=UpperCamelCase__ ) )
# merges_file (bpecodes)
_UpperCAmelCase : Union[str, Any] = os.path.join(UpperCamelCase__ , VOCAB_FILES_NAMES['''merges_file'''] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
_UpperCAmelCase : Optional[Any] = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
if os.path.exists(UpperCamelCase__ ):
break
with open(UpperCamelCase__ , encoding='''utf-8''' ) as fin:
_UpperCAmelCase : Dict = fin.read()
_UpperCAmelCase : List[Any] = re.sub(r''' \d+$''' , '''''' , UpperCamelCase__ , 0 , re.M ) # remove frequency number
print(F'Generating {merges_file}' )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as fout:
fout.write(UpperCamelCase__ )
# model config
_UpperCAmelCase : int = os.path.join(UpperCamelCase__ , '''config.json''' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", F'need to extend tokenizer to support bpe={args["bpe"]}'
assert args["tokenizer"] == "moses", F'need to extend tokenizer to support bpe={args["tokenizer"]}'
_UpperCAmelCase : Union[str, Any] = {
'''architectures''': ['''FSMTForConditionalGeneration'''],
'''model_type''': '''fsmt''',
'''activation_dropout''': args['''activation_dropout'''],
'''activation_function''': '''relu''',
'''attention_dropout''': args['''attention_dropout'''],
'''d_model''': args['''decoder_embed_dim'''],
'''dropout''': args['''dropout'''],
'''init_std''': 0.02,
'''max_position_embeddings''': args['''max_source_positions'''],
'''num_hidden_layers''': args['''encoder_layers'''],
'''src_vocab_size''': src_vocab_size,
'''tgt_vocab_size''': tgt_vocab_size,
'''langs''': [src_lang, tgt_lang],
'''encoder_attention_heads''': args['''encoder_attention_heads'''],
'''encoder_ffn_dim''': args['''encoder_ffn_embed_dim'''],
'''encoder_layerdrop''': args['''encoder_layerdrop'''],
'''encoder_layers''': args['''encoder_layers'''],
'''decoder_attention_heads''': args['''decoder_attention_heads'''],
'''decoder_ffn_dim''': args['''decoder_ffn_embed_dim'''],
'''decoder_layerdrop''': args['''decoder_layerdrop'''],
'''decoder_layers''': args['''decoder_layers'''],
'''bos_token_id''': 0,
'''pad_token_id''': 1,
'''eos_token_id''': 2,
'''is_encoder_decoder''': True,
'''scale_embedding''': not args['''no_scale_embedding'''],
'''tie_word_embeddings''': args['''share_all_embeddings'''],
}
# good hparam defaults to start with
_UpperCAmelCase : Tuple = 5
_UpperCAmelCase : Any = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
_UpperCAmelCase : int = best_score_hparams[model_dir]['''length_penalty''']
else:
_UpperCAmelCase : str = 1.0
print(F'Generating {fsmt_model_config_file}' )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(UpperCamelCase__ , ensure_ascii=UpperCamelCase__ , indent=UpperCamelCase__ ) )
# tokenizer config
_UpperCAmelCase : int = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase : Tuple = {
'''langs''': [src_lang, tgt_lang],
'''model_max_length''': 1024,
'''do_lower_case''': do_lower_case,
}
print(F'Generating {fsmt_tokenizer_config_file}' )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(UpperCamelCase__ , ensure_ascii=UpperCamelCase__ , indent=UpperCamelCase__ ) )
# model
_UpperCAmelCase : Tuple = chkpt['''models'''][0]
_UpperCAmelCase : Dict = model.state_dict()
# rename keys to start with 'model.'
_UpperCAmelCase : List[str] = OrderedDict(('''model.''' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
_UpperCAmelCase : Any = [
'''model.model''',
'''model.encoder.version''',
'''model.decoder.version''',
'''model.encoder_embed_tokens.weight''',
'''model.decoder_embed_tokens.weight''',
'''model.encoder.embed_positions._float_tensor''',
'''model.decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
model_state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase : Dict = FSMTConfig.from_pretrained(UpperCamelCase__ )
_UpperCAmelCase : str = FSMTForConditionalGeneration(UpperCamelCase__ )
# check that it loads ok
model_new.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
# save
_UpperCAmelCase : Any = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
print(F'Generating {pytorch_weights_dump_path}' )
torch.save(UpperCamelCase__ , UpperCamelCase__ )
print('''Conversion is done!''' )
print('''\nLast step is to upload the files to s3''' )
print(F'cd {data_root}' )
print(F'transformers-cli upload {model_dir}' )
if __name__ == "__main__":
_lowerCAmelCase :int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fsmt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_lowerCAmelCase :Optional[int] = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 506 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , A , A=7 , A=3 , A=1_8 , A=3_0 , A=4_0_0 , A=True , A=None , A=True , ) -> str:
_UpperCAmelCase : List[Any] = size if size is not None else {'''height''': 1_8, '''width''': 1_8}
_UpperCAmelCase : Dict = parent
_UpperCAmelCase : Optional[int] = batch_size
_UpperCAmelCase : Optional[Any] = num_channels
_UpperCAmelCase : List[Any] = image_size
_UpperCAmelCase : Dict = min_resolution
_UpperCAmelCase : Union[str, Any] = max_resolution
_UpperCAmelCase : List[str] = do_resize
_UpperCAmelCase : Union[str, Any] = size
_UpperCAmelCase : List[Any] = apply_ocr
def __lowerCAmelCase ( self ) -> str:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _UpperCAmelCase ( a ,unittest.TestCase ):
'''simple docstring'''
a__ =LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_UpperCAmelCase : Any = LayoutLMvaImageProcessingTester(self )
@property
def __lowerCAmelCase ( self ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self ) -> Tuple:
_UpperCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , '''do_resize''' ) )
self.assertTrue(hasattr(A , '''size''' ) )
self.assertTrue(hasattr(A , '''apply_ocr''' ) )
def __lowerCAmelCase ( self ) -> Optional[Any]:
_UpperCAmelCase : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 1_8, '''width''': 1_8} )
_UpperCAmelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {'''height''': 4_2, '''width''': 4_2} )
def __lowerCAmelCase ( self ) -> int:
pass
def __lowerCAmelCase ( self ) -> Any:
# Initialize image_processing
_UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
_UpperCAmelCase : str = image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , A )
self.assertIsInstance(encoding.boxes , A )
# Test batched
_UpperCAmelCase : Any = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def __lowerCAmelCase ( self ) -> Dict:
# Initialize image_processing
_UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input
_UpperCAmelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
_UpperCAmelCase : Any = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def __lowerCAmelCase ( self ) -> Optional[Any]:
# Initialize image_processing
_UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
_UpperCAmelCase : str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
_UpperCAmelCase : List[Any] = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def __lowerCAmelCase ( self ) -> Optional[int]:
# with apply_OCR = True
_UpperCAmelCase : Tuple = LayoutLMvaImageProcessor()
from datasets import load_dataset
_UpperCAmelCase : Optional[int] = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
_UpperCAmelCase : Union[str, Any] = Image.open(ds[0]['''file'''] ).convert('''RGB''' )
_UpperCAmelCase : Tuple = image_processing(A , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
_UpperCAmelCase : Optional[int] = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
_UpperCAmelCase : str = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , A )
self.assertListEqual(encoding.boxes , A )
# with apply_OCR = False
_UpperCAmelCase : Union[str, Any] = LayoutLMvaImageProcessor(apply_ocr=A )
_UpperCAmelCase : str = image_processing(A , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) )
| 506 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> float:
"""simple docstring"""
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) )
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple ) -> list[list[list[float] | float]]:
"""simple docstring"""
if dataset.ndim != value_array.ndim:
SCREAMING_SNAKE_CASE_ : Optional[int] = (
"Wrong input data's dimensions... "
F"dataset : {dataset.ndim}, value_array : {value_array.ndim}"
)
raise ValueError(__SCREAMING_SNAKE_CASE )
try:
if dataset.shape[1] != value_array.shape[1]:
SCREAMING_SNAKE_CASE_ : Dict = (
"Wrong input data's shape... "
F"dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"
)
raise ValueError(__SCREAMING_SNAKE_CASE )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("Wrong shape" )
if dataset.dtype != value_array.dtype:
SCREAMING_SNAKE_CASE_ : List[Any] = (
"Input data have different datatype... "
F"dataset : {dataset.dtype}, value_array : {value_array.dtype}"
)
raise TypeError(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = []
for value in value_array:
SCREAMING_SNAKE_CASE_ : Optional[int] = euclidean(__SCREAMING_SNAKE_CASE , dataset[0] )
SCREAMING_SNAKE_CASE_ : Tuple = dataset[0].tolist()
for dataset_value in dataset[1:]:
SCREAMING_SNAKE_CASE_ : List[str] = euclidean(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if dist > temp_dist:
SCREAMING_SNAKE_CASE_ : List[str] = temp_dist
SCREAMING_SNAKE_CASE_ : List[Any] = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] ) -> float:
"""simple docstring"""
return np.dot(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) / (norm(__SCREAMING_SNAKE_CASE ) * norm(__SCREAMING_SNAKE_CASE ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'facebook/xmod-base': 'https://huggingface.co/facebook/xmod-base/resolve/main/config.json',
'facebook/xmod-large-prenorm': 'https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json',
'facebook/xmod-base-13-125k': 'https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json',
'facebook/xmod-base-30-125k': 'https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json',
'facebook/xmod-base-30-195k': 'https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json',
'facebook/xmod-base-60-125k': 'https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json',
'facebook/xmod-base-60-265k': 'https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json',
'facebook/xmod-base-75-125k': 'https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json',
'facebook/xmod-base-75-269k': 'https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
_A = "xmod"
def __init__( self , lowercase__=3_0522 , lowercase__=768 , lowercase__=12 , lowercase__=12 , lowercase__=3072 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=512 , lowercase__=2 , lowercase__=0.02 , lowercase__=1e-12 , lowercase__=1 , lowercase__=0 , lowercase__=2 , lowercase__="absolute" , lowercase__=True , lowercase__=None , lowercase__=False , lowercase__=2 , lowercase__=False , lowercase__=True , lowercase__=True , lowercase__=("en_XX",) , lowercase__=None , **lowercase__ , ):
"""simple docstring"""
super().__init__(pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__ )
SCREAMING_SNAKE_CASE_ : Tuple = vocab_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE_ : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE_ : List[str] = hidden_act
SCREAMING_SNAKE_CASE_ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE_ : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Dict = type_vocab_size
SCREAMING_SNAKE_CASE_ : str = initializer_range
SCREAMING_SNAKE_CASE_ : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Tuple = position_embedding_type
SCREAMING_SNAKE_CASE_ : str = use_cache
SCREAMING_SNAKE_CASE_ : Optional[int] = classifier_dropout
SCREAMING_SNAKE_CASE_ : int = pre_norm
SCREAMING_SNAKE_CASE_ : Optional[int] = adapter_reduction_factor
SCREAMING_SNAKE_CASE_ : List[str] = adapter_layer_norm
SCREAMING_SNAKE_CASE_ : List[str] = adapter_reuse_layer_norm
SCREAMING_SNAKE_CASE_ : int = ln_before_adapter
SCREAMING_SNAKE_CASE_ : List[Any] = list(lowercase__ )
SCREAMING_SNAKE_CASE_ : Any = default_language
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ : Tuple = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 68 | 0 |
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowerCamelCase_ ( _lowercase , _lowercase ) -> str:
__A : Optional[int] = old_name
if "patch_embed" in old_name:
__A , __A , __A : Optional[int] = old_name.split("." )
if layer == "0":
__A : List[str] = old_name.replace("0" , "convolution1" )
elif layer == "1":
__A : Optional[int] = old_name.replace("1" , "batchnorm_before" )
elif layer == "3":
__A : List[Any] = old_name.replace("3" , "convolution2" )
else:
__A : Dict = old_name.replace("4" , "batchnorm_after" )
if "network" in old_name and re.search(r"\d\.\d" , _lowercase ):
__A : Dict = r"\b\d{2}\b"
if bool(re.search(_lowercase , _lowercase ) ):
__A : Optional[int] = re.search(r"\d\.\d\d." , _lowercase ).group()
else:
__A : Dict = re.search(r"\d\.\d." , _lowercase ).group()
if int(match[0] ) < 6:
__A : int = old_name.replace(_lowercase , "" )
__A : Optional[Any] = trimmed_name.replace("network" , match[0] + ".meta4D_layers.blocks." + match[2:-1] )
__A : int = "intermediate_stages." + trimmed_name
else:
__A : str = old_name.replace(_lowercase , "" )
if int(match[2] ) < num_meta4D_last_stage:
__A : List[str] = trimmed_name.replace("network" , "meta4D_layers.blocks." + match[2] )
else:
__A : Any = str(int(match[2] ) - num_meta4D_last_stage )
__A : str = trimmed_name.replace("network" , "meta3D_layers.blocks." + layer_index )
if "norm1" in old_name:
__A : Any = trimmed_name.replace("norm1" , "layernorm1" )
elif "norm2" in old_name:
__A : List[Any] = trimmed_name.replace("norm2" , "layernorm2" )
elif "fc1" in old_name:
__A : Optional[Any] = trimmed_name.replace("fc1" , "linear_in" )
elif "fc2" in old_name:
__A : Optional[int] = trimmed_name.replace("fc2" , "linear_out" )
__A : str = "last_stage." + trimmed_name
elif "network" in old_name and re.search(r".\d." , _lowercase ):
__A : Optional[int] = old_name.replace("network" , "intermediate_stages" )
if "fc" in new_name:
__A : Tuple = new_name.replace("fc" , "convolution" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
__A : Tuple = new_name.replace("norm1" , "batchnorm_before" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
__A : Dict = new_name.replace("norm2" , "batchnorm_after" )
if "proj" in new_name:
__A : List[str] = new_name.replace("proj" , "projection" )
if "dist_head" in new_name:
__A : int = new_name.replace("dist_head" , "distillation_classifier" )
elif "head" in new_name:
__A : Any = new_name.replace("head" , "classifier" )
elif "patch_embed" in new_name:
__A : List[str] = "efficientformer." + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
__A : Any = new_name.replace("norm" , "layernorm" )
__A : Dict = "efficientformer." + new_name
else:
__A : Optional[int] = "efficientformer.encoder." + new_name
return new_name
def lowerCamelCase_ ( _lowercase , _lowercase ) -> Optional[Any]:
for key in checkpoint.copy().keys():
__A : Union[str, Any] = checkpoint.pop(_lowercase )
__A : List[str] = val
return checkpoint
def lowerCamelCase_ ( ) -> Optional[int]:
__A : Any = "http://images.cocodataset.org/val2017/000000039769.jpg"
__A : str = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return image
def lowerCamelCase_ ( _lowercase , _lowercase , _lowercase , _lowercase ) -> List[str]:
__A : int = torch.load(_lowercase , map_location="cpu" )["model"]
__A : Optional[Any] = EfficientFormerConfig.from_json_file(_lowercase )
__A : List[Any] = EfficientFormerForImageClassificationWithTeacher(_lowercase )
__A : Any = "_".join(checkpoint_path.split("/" )[-1].split("." )[0].split("_" )[:-1] )
__A : str = config.depths[-1] - config.num_metaad_blocks + 1
__A : str = convert_torch_checkpoint(_lowercase , _lowercase )
model.load_state_dict(_lowercase )
model.eval()
__A : Optional[int] = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
# prepare image
__A : Union[str, Any] = prepare_img()
__A : List[str] = 256
__A : Tuple = 224
__A : List[str] = EfficientFormerImageProcessor(
size={"shortest_edge": image_size} , crop_size={"height": crop_size, "width": crop_size} , resample=pillow_resamplings["bicubic"] , )
__A : Tuple = processor(images=_lowercase , return_tensors="pt" ).pixel_values
# original processing pipeline
__A : Union[str, Any] = Compose(
[
Resize(_lowercase , interpolation=pillow_resamplings["bicubic"] ),
CenterCrop(_lowercase ),
ToTensor(),
Normalize(_lowercase , _lowercase ),
] )
__A : List[str] = image_transforms(_lowercase ).unsqueeze(0 )
assert torch.allclose(_lowercase , _lowercase )
__A : str = model(_lowercase )
__A : Any = outputs.logits
__A : str = (1, 1_000)
if "l1" in model_name:
__A : Optional[Any] = torch.Tensor(
[-0.13_12, 0.43_53, -1.04_99, -0.51_24, 0.41_83, -0.67_93, -1.37_77, -0.08_93, -0.73_58, -2.43_28] )
assert torch.allclose(logits[0, :10] , _lowercase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
__A : Optional[int] = torch.Tensor(
[-1.31_50, -1.54_56, -1.25_56, -0.84_96, -0.71_27, -0.78_97, -0.97_28, -0.30_52, 0.37_51, -0.31_27] )
assert torch.allclose(logits[0, :10] , _lowercase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
__A : Tuple = torch.Tensor(
[-1.02_83, -1.41_31, -0.56_44, -1.31_15, -0.57_85, -1.20_49, -0.75_28, 0.19_92, -0.38_22, -0.08_78] )
assert logits.shape == expected_shape
else:
raise ValueError(
F"Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7" )
# Save Checkpoints
Path(_lowercase ).mkdir(exist_ok=_lowercase )
model.save_pretrained(_lowercase )
print(F"Checkpoint successfuly converted. Model saved at {pytorch_dump_path}" )
processor.save_pretrained(_lowercase )
print(F"Processor successfuly saved at {pytorch_dump_path}" )
if push_to_hub:
print("Pushing model to the hub..." )
model.push_to_hub(
repo_id=F"Bearnardd/{pytorch_dump_path}" , commit_message="Add model" , use_temp_dir=_lowercase , )
processor.push_to_hub(
repo_id=F"Bearnardd/{pytorch_dump_path}" , commit_message="Add image processor" , use_temp_dir=_lowercase , )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path',
default=None,
type=str,
required=True,
help='Path to EfficientFormer pytorch checkpoint.',
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for EfficientFormer model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
parser.set_defaults(push_to_hub=True)
UpperCamelCase = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 520 | UpperCamelCase = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
UpperCamelCase = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
UpperCamelCase = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
UpperCamelCase = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
UpperCamelCase = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
UpperCamelCase = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
UpperCamelCase = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
UpperCamelCase = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 520 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCamelCase : Any = logging.get_logger(__name__)
__lowerCamelCase : Union[str, Any] = {
'''shi-labs/nat-mini-in1k-224''': '''https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json''',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class A_ (a_ , a_ ):
"""simple docstring"""
a__ = '''nat'''
a__ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self :str , lowerCAmelCase__ :Dict=4 , lowerCAmelCase__ :Union[str, Any]=3 , lowerCAmelCase__ :Dict=64 , lowerCAmelCase__ :List[Any]=[3, 4, 6, 5] , lowerCAmelCase__ :List[str]=[2, 4, 8, 16] , lowerCAmelCase__ :Optional[int]=7 , lowerCAmelCase__ :Dict=3.0 , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :str=0.0 , lowerCAmelCase__ :Any=0.0 , lowerCAmelCase__ :List[Any]=0.1 , lowerCAmelCase__ :str="gelu" , lowerCAmelCase__ :List[str]=0.0_2 , lowerCAmelCase__ :List[Any]=1E-5 , lowerCAmelCase__ :Tuple=0.0 , lowerCAmelCase__ :Union[str, Any]=None , lowerCAmelCase__ :Any=None , **lowerCAmelCase__ :Optional[int] , ) -> str:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
snake_case_ : str = patch_size
snake_case_ : Optional[Any] = num_channels
snake_case_ : List[str] = embed_dim
snake_case_ : Dict = depths
snake_case_ : Optional[int] = len(lowerCAmelCase__ )
snake_case_ : List[Any] = num_heads
snake_case_ : Optional[Any] = kernel_size
snake_case_ : Dict = mlp_ratio
snake_case_ : Optional[int] = qkv_bias
snake_case_ : Union[str, Any] = hidden_dropout_prob
snake_case_ : Optional[int] = attention_probs_dropout_prob
snake_case_ : Union[str, Any] = drop_path_rate
snake_case_ : int = hidden_act
snake_case_ : Optional[int] = layer_norm_eps
snake_case_ : Tuple = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case_ : Any = int(embed_dim * 2 ** (len(lowerCAmelCase__ ) - 1) )
snake_case_ : str = layer_scale_init_value
snake_case_ : List[Any] = ["stem"] + [F'''stage{idx}''' for idx in range(1 , len(lowerCAmelCase__ ) + 1 )]
snake_case_, snake_case_ : List[Any] = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase__ , out_indices=lowerCAmelCase__ , stage_names=self.stage_names )
| 656 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__lowerCamelCase : Optional[Any] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', f'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', f'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', f'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', f'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.weight''', f'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', f'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', f'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', f'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.weight''', f'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', f'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', f'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', f'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', f'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', f'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.bias''', f'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', f'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', f'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', f'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.bias''', f'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', f'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''),
('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''),
('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''),
('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''),
('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''),
('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''),
('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''),
('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''),
('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''),
('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''),
]
)
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
snake_case_ : Optional[Any] = state_dict.pop(__magic_name__ )
snake_case_ : Any = val
def __UpperCAmelCase ( __magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_ : Any = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
snake_case_ : Optional[Any] = key.replace("backbone.0.body" ,"backbone.conv_encoder.model" )
snake_case_ : int = value
else:
snake_case_ : int = value
return new_state_dict
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=False )-> Optional[int]:
"""simple docstring"""
snake_case_ : str = ""
if is_panoptic:
snake_case_ : Dict = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
snake_case_ : Any = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
snake_case_ : Optional[int] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case_ : Tuple = in_proj_weight[:256, :]
snake_case_ : List[Any] = in_proj_bias[:256]
snake_case_ : Optional[Any] = in_proj_weight[256:512, :]
snake_case_ : Optional[int] = in_proj_bias[256:512]
snake_case_ : Optional[int] = in_proj_weight[-256:, :]
snake_case_ : str = in_proj_bias[-256:]
def __UpperCAmelCase ( )-> Optional[Any]:
"""simple docstring"""
snake_case_ : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
snake_case_ : Optional[Any] = Image.open(requests.get(__magic_name__ ,stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> List[str]:
"""simple docstring"""
snake_case_ : Optional[Any] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
snake_case_ : Optional[Any] = "resnet101"
if "dc5" in model_name:
snake_case_ : List[str] = True
snake_case_ : Tuple = "panoptic" in model_name
if is_panoptic:
snake_case_ : List[Any] = 250
else:
snake_case_ : Optional[Any] = 91
snake_case_ : Optional[int] = "huggingface/label-files"
snake_case_ : Dict = "coco-detection-id2label.json"
snake_case_ : List[Any] = json.load(open(hf_hub_download(__magic_name__ ,__magic_name__ ,repo_type="dataset" ) ,"r" ) )
snake_case_ : Optional[int] = {int(__magic_name__ ): v for k, v in idalabel.items()}
snake_case_ : int = idalabel
snake_case_ : Dict = {v: k for k, v in idalabel.items()}
# load image processor
snake_case_ : Optional[int] = "coco_panoptic" if is_panoptic else "coco_detection"
snake_case_ : str = ConditionalDetrImageProcessor(format=__magic_name__ )
# prepare image
snake_case_ : str = prepare_img()
snake_case_ : int = image_processor(images=__magic_name__ ,return_tensors="pt" )
snake_case_ : Union[str, Any] = encoding["pixel_values"]
logger.info(F'''Converting model {model_name}...''' )
# load original model from torch hub
snake_case_ : Union[str, Any] = torch.hub.load("DeppMeng/ConditionalDETR" ,__magic_name__ ,pretrained=__magic_name__ ).eval()
snake_case_ : Any = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
snake_case_ : Any = "conditional_detr." + src
rename_key(__magic_name__ ,__magic_name__ ,__magic_name__ )
snake_case_ : Tuple = rename_backbone_keys(__magic_name__ )
# query, key and value matrices need special treatment
read_in_q_k_v(__magic_name__ ,is_panoptic=__magic_name__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
snake_case_ : int = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
snake_case_ : Any = state_dict.pop(__magic_name__ )
snake_case_ : Optional[int] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
snake_case_ : Tuple = state_dict.pop(__magic_name__ )
snake_case_ : Any = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
snake_case_ : Union[str, Any] = state_dict.pop(__magic_name__ )
snake_case_ : List[Any] = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
snake_case_ : Any = state_dict.pop(__magic_name__ )
snake_case_ : List[Any] = val
# finally, create HuggingFace model and load state dict
snake_case_ : Optional[int] = ConditionalDetrForSegmentation(__magic_name__ ) if is_panoptic else ConditionalDetrForObjectDetection(__magic_name__ )
model.load_state_dict(__magic_name__ )
model.eval()
model.push_to_hub(repo_id=__magic_name__ ,organization="DepuMeng" ,commit_message="Add model" )
# verify our conversion
snake_case_ : Dict = conditional_detr(__magic_name__ )
snake_case_ : Union[str, Any] = model(__magic_name__ )
assert torch.allclose(outputs.logits ,original_outputs["pred_logits"] ,atol=1E-4 )
assert torch.allclose(outputs.pred_boxes ,original_outputs["pred_boxes"] ,atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks ,original_outputs["pred_masks"] ,atol=1E-4 )
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
model.save_pretrained(__magic_name__ )
image_processor.save_pretrained(__magic_name__ )
if __name__ == "__main__":
__lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''conditional_detr_resnet50''',
type=str,
help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
__lowerCamelCase : int = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 656 | 1 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = OrderedDict(
[
("""align""", """EfficientNetImageProcessor"""),
("""beit""", """BeitImageProcessor"""),
("""bit""", """BitImageProcessor"""),
("""blip""", """BlipImageProcessor"""),
("""blip-2""", """BlipImageProcessor"""),
("""bridgetower""", """BridgeTowerImageProcessor"""),
("""chinese_clip""", """ChineseCLIPImageProcessor"""),
("""clip""", """CLIPImageProcessor"""),
("""clipseg""", """ViTImageProcessor"""),
("""conditional_detr""", """ConditionalDetrImageProcessor"""),
("""convnext""", """ConvNextImageProcessor"""),
("""convnextv2""", """ConvNextImageProcessor"""),
("""cvt""", """ConvNextImageProcessor"""),
("""data2vec-vision""", """BeitImageProcessor"""),
("""deformable_detr""", """DeformableDetrImageProcessor"""),
("""deit""", """DeiTImageProcessor"""),
("""deta""", """DetaImageProcessor"""),
("""detr""", """DetrImageProcessor"""),
("""dinat""", """ViTImageProcessor"""),
("""donut-swin""", """DonutImageProcessor"""),
("""dpt""", """DPTImageProcessor"""),
("""efficientformer""", """EfficientFormerImageProcessor"""),
("""efficientnet""", """EfficientNetImageProcessor"""),
("""flava""", """FlavaImageProcessor"""),
("""focalnet""", """BitImageProcessor"""),
("""git""", """CLIPImageProcessor"""),
("""glpn""", """GLPNImageProcessor"""),
("""groupvit""", """CLIPImageProcessor"""),
("""imagegpt""", """ImageGPTImageProcessor"""),
("""instructblip""", """BlipImageProcessor"""),
("""layoutlmv2""", """LayoutLMv2ImageProcessor"""),
("""layoutlmv3""", """LayoutLMv3ImageProcessor"""),
("""levit""", """LevitImageProcessor"""),
("""mask2former""", """Mask2FormerImageProcessor"""),
("""maskformer""", """MaskFormerImageProcessor"""),
("""mgp-str""", """ViTImageProcessor"""),
("""mobilenet_v1""", """MobileNetV1ImageProcessor"""),
("""mobilenet_v2""", """MobileNetV2ImageProcessor"""),
("""mobilevit""", """MobileViTImageProcessor"""),
("""mobilevit""", """MobileViTImageProcessor"""),
("""mobilevitv2""", """MobileViTImageProcessor"""),
("""nat""", """ViTImageProcessor"""),
("""oneformer""", """OneFormerImageProcessor"""),
("""owlvit""", """OwlViTImageProcessor"""),
("""perceiver""", """PerceiverImageProcessor"""),
("""pix2struct""", """Pix2StructImageProcessor"""),
("""poolformer""", """PoolFormerImageProcessor"""),
("""regnet""", """ConvNextImageProcessor"""),
("""resnet""", """ConvNextImageProcessor"""),
("""sam""", """SamImageProcessor"""),
("""segformer""", """SegformerImageProcessor"""),
("""swiftformer""", """ViTImageProcessor"""),
("""swin""", """ViTImageProcessor"""),
("""swin2sr""", """Swin2SRImageProcessor"""),
("""swinv2""", """ViTImageProcessor"""),
("""table-transformer""", """DetrImageProcessor"""),
("""timesformer""", """VideoMAEImageProcessor"""),
("""tvlt""", """TvltImageProcessor"""),
("""upernet""", """SegformerImageProcessor"""),
("""van""", """ConvNextImageProcessor"""),
("""videomae""", """VideoMAEImageProcessor"""),
("""vilt""", """ViltImageProcessor"""),
("""vit""", """ViTImageProcessor"""),
("""vit_hybrid""", """ViTHybridImageProcessor"""),
("""vit_mae""", """ViTImageProcessor"""),
("""vit_msn""", """ViTImageProcessor"""),
("""xclip""", """CLIPImageProcessor"""),
("""yolos""", """YolosImageProcessor"""),
]
)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def __magic_name__ ( lowercase ) -> Optional[Any]:
"""simple docstring"""
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
lowercase_ : int = model_type_to_module_name(_A )
lowercase_ : List[Any] = importlib.import_module(f""".{module_name}""" , """transformers.models""" )
try:
return getattr(_A , _A )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(_A , """__name__""" , _A ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
lowercase_ : Optional[Any] = importlib.import_module("""transformers""" )
if hasattr(_A , _A ):
return getattr(_A , _A )
return None
def __magic_name__ ( lowercase , lowercase = None , lowercase = False , lowercase = False , lowercase = None , lowercase = None , lowercase = None , lowercase = False , **lowercase , ) -> Tuple:
"""simple docstring"""
lowercase_ : int = get_file_from_repo(
_A , _A , cache_dir=_A , force_download=_A , resume_download=_A , proxies=_A , use_auth_token=_A , revision=_A , local_files_only=_A , )
if resolved_config_file is None:
logger.info(
"""Could not locate the image processor configuration file, will try to use the model config instead.""" )
return {}
with open(_A , encoding="""utf-8""" ) as reader:
return json.load(_A )
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ) -> List[Any]:
"""simple docstring"""
raise EnvironmentError(
"""AutoImageProcessor is designed to be instantiated """
"""using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.""" )
@classmethod
@replace_list_option_in_docstrings(__snake_case )
def snake_case__ ( cls, snake_case__, **snake_case__ ) -> List[Any]:
"""simple docstring"""
lowercase_ : Optional[int] = kwargs.pop("""config""", __snake_case )
lowercase_ : Optional[Any] = kwargs.pop("""trust_remote_code""", __snake_case )
lowercase_ : List[str] = True
lowercase_ : Tuple = ImageProcessingMixin.get_image_processor_dict(__snake_case, **__snake_case )
lowercase_ : str = config_dict.get("""image_processor_type""", __snake_case )
lowercase_ : Optional[int] = None
if "AutoImageProcessor" in config_dict.get("""auto_map""", {} ):
lowercase_ : str = config_dict['auto_map']['AutoImageProcessor']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
lowercase_ : List[Any] = config_dict.pop("""feature_extractor_type""", __snake_case )
if feature_extractor_class is not None:
logger.warning(
"""Could not find image processor class in the image processor config or the model config. Loading"""
""" based on pattern matching with the model\'s feature extractor configuration.""" )
lowercase_ : List[Any] = feature_extractor_class.replace("""FeatureExtractor""", """ImageProcessor""" )
if "AutoFeatureExtractor" in config_dict.get("""auto_map""", {} ):
lowercase_ : Optional[Any] = config_dict['auto_map']['AutoFeatureExtractor']
lowercase_ : Optional[Any] = feature_extractor_auto_map.replace("""FeatureExtractor""", """ImageProcessor""" )
logger.warning(
"""Could not find image processor auto map in the image processor config or the model config."""
""" Loading based on pattern matching with the model\'s feature extractor configuration.""" )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(__snake_case, __snake_case ):
lowercase_ : Dict = AutoConfig.from_pretrained(__snake_case, **__snake_case )
# It could be in `config.image_processor_type``
lowercase_ : List[Any] = getattr(__snake_case, """image_processor_type""", __snake_case )
if hasattr(__snake_case, """auto_map""" ) and "AutoImageProcessor" in config.auto_map:
lowercase_ : str = config.auto_map['AutoImageProcessor']
if image_processor_class is not None:
lowercase_ : Tuple = image_processor_class_from_name(__snake_case )
lowercase_ : Optional[Any] = image_processor_auto_map is not None
lowercase_ : List[str] = image_processor_class is not None or type(__snake_case ) in IMAGE_PROCESSOR_MAPPING
lowercase_ : Dict = resolve_trust_remote_code(
__snake_case, __snake_case, __snake_case, __snake_case )
if has_remote_code and trust_remote_code:
lowercase_ : Dict = get_class_from_dynamic_module(
__snake_case, __snake_case, **__snake_case )
lowercase_ : List[str] = kwargs.pop("""code_revision""", __snake_case )
if os.path.isdir(__snake_case ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(__snake_case, **__snake_case )
elif image_processor_class is not None:
return image_processor_class.from_dict(__snake_case, **__snake_case )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(__snake_case ) in IMAGE_PROCESSOR_MAPPING:
lowercase_ : str = IMAGE_PROCESSOR_MAPPING[type(__snake_case )]
return image_processor_class.from_dict(__snake_case, **__snake_case )
raise ValueError(
f"""Unrecognized image processor in {pretrained_model_name_or_path}. Should have a """
f"""`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following """
f"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def snake_case__ ( snake_case__, snake_case__ ) -> Tuple:
"""simple docstring"""
IMAGE_PROCESSOR_MAPPING.register(__snake_case, __snake_case ) | 458 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase: List[str] = {
'configuration_falcon': ['FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FalconConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: str = [
'FALCON_PRETRAINED_MODEL_ARCHIVE_LIST',
'FalconForCausalLM',
'FalconModel',
'FalconPreTrainedModel',
'FalconForSequenceClassification',
'FalconForTokenClassification',
'FalconForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
lowerCAmelCase: Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 526 | 0 |
def __UpperCamelCase ( lowerCAmelCase__ : str ):
return " ".join(
''''''.join(word[::-1] ) if len(lowerCAmelCase__ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('Hey wollef sroirraw'))
| 707 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def __UpperCamelCase ( lowerCAmelCase__ : Any ):
__a : Dict = filter(lambda lowerCAmelCase__ : p.requires_grad , model.parameters() )
__a : Tuple = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowercase__ =logging.getLogger(__name__)
def __UpperCamelCase ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[Any] ):
if metric == "rouge2":
__a : List[Any] = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
__a : List[str] = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
__a : Optional[Any] = '''{val_avg_em:.4f}-{step_count}'''
else:
raise NotImplementedError(
f"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"
''' function.''' )
__a : List[Any] = ModelCheckpoint(
dirpath=lowerCAmelCase__ , filename=lowerCAmelCase__ , monitor=f"val_{metric}" , mode='''max''' , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def __UpperCamelCase ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any] ):
return EarlyStopping(
monitor=f"val_{metric}" , mode='''min''' if '''loss''' in metric else '''max''' , patience=lowerCAmelCase__ , verbose=lowerCAmelCase__ , )
class UpperCamelCase__ ( pl.Callback ):
def lowerCAmelCase (self : List[str] , snake_case_ : Any , snake_case_ : Any ):
__a : Optional[int] = {f"lr_group_{i}": param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(snake_case_ )
@rank_zero_only
def lowerCAmelCase (self : str , snake_case_ : pl.Trainer , snake_case_ : pl.LightningModule , snake_case_ : str , snake_case_ : Dict=True ):
logger.info(f"***** {type_path} results at step {trainer.global_step:05d} *****" )
__a : List[str] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
__a : Union[str, Any] = Path(pl_module.hparams.output_dir )
if type_path == "test":
__a : Union[str, Any] = od / '''test_results.txt'''
__a : Optional[Any] = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__a : Optional[int] = od / f"{type_path}_results/{trainer.global_step:05d}.txt"
__a : List[str] = od / f"{type_path}_generations/{trainer.global_step:05d}.txt"
results_file.parent.mkdir(exist_ok=snake_case_ )
generations_file.parent.mkdir(exist_ok=snake_case_ )
with open(snake_case_ , '''a+''' ) as writer:
for key in sorted(snake_case_ ):
if key in ["log", "progress_bar", "preds"]:
continue
__a : Tuple = metrics[key]
if isinstance(snake_case_ , torch.Tensor ):
__a : Optional[int] = val.item()
__a : List[str] = f"{key}: {val:.6f}\n"
writer.write(snake_case_ )
if not save_generations:
return
if "preds" in metrics:
__a : Optional[Any] = '''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(snake_case_ )
@rank_zero_only
def lowerCAmelCase (self : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : Tuple ):
try:
__a : Union[str, Any] = pl_module.model.model.num_parameters()
except AttributeError:
__a : int = pl_module.model.num_parameters()
__a : Any = count_trainable_parameters(snake_case_ )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1E6, '''grad_mp''': n_trainable_pars / 1E6} )
@rank_zero_only
def lowerCAmelCase (self : Optional[int] , snake_case_ : pl.Trainer , snake_case_ : pl.LightningModule ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(snake_case_ , snake_case_ , '''test''' )
@rank_zero_only
def lowerCAmelCase (self : Union[str, Any] , snake_case_ : pl.Trainer , snake_case_ : str ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 326 | 0 |
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self : List[str] , __snake_case : List[Any] , __snake_case : Union[str, Any]=13 , __snake_case : Dict=7 , __snake_case : Optional[int]=True , __snake_case : Union[str, Any]=True , __snake_case : Dict=True , __snake_case : Union[str, Any]=True , __snake_case : Optional[Any]=99 , __snake_case : str=32 , __snake_case : Dict=5 , __snake_case : Optional[int]=4 , __snake_case : Dict=37 , __snake_case : Optional[int]="gelu" , __snake_case : str=0.1 , __snake_case : Tuple=0.1 , __snake_case : int=512 , __snake_case : str=16 , __snake_case : List[str]=2 , __snake_case : int=0.02 , __snake_case : Optional[Any]=False , __snake_case : Optional[int]=True , __snake_case : Union[str, Any]="None" , __snake_case : List[Any]=3 , __snake_case : Optional[int]=4 , __snake_case : List[Any]=None , ) -> Union[str, Any]:
_a : Optional[int] = parent
_a : str = batch_size
_a : Dict = seq_length
_a : Union[str, Any] = is_training
_a : List[str] = use_input_mask
_a : Tuple = use_token_type_ids
_a : List[Any] = use_labels
_a : Union[str, Any] = vocab_size
_a : List[Any] = hidden_size
_a : str = num_hidden_layers
_a : Tuple = num_attention_heads
_a : Union[str, Any] = intermediate_size
_a : List[Any] = hidden_act
_a : Dict = hidden_dropout_prob
_a : Tuple = attention_probs_dropout_prob
_a : int = max_position_embeddings
_a : Optional[Any] = type_vocab_size
_a : List[Any] = type_sequence_label_size
_a : Tuple = initializer_range
_a : Tuple = num_labels
_a : Optional[Any] = num_choices
_a : Optional[Any] = relative_attention
_a : Union[str, Any] = position_biased_input
_a : Union[str, Any] = pos_att_type
_a : Tuple = scope
def snake_case_ ( self : Union[str, Any] ) -> int:
_a : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a : Union[str, Any] = None
if self.use_input_mask:
_a : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_a : Union[str, Any] = None
if self.use_token_type_ids:
_a : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a : Tuple = None
_a : Any = None
_a : Dict = None
if self.use_labels:
_a : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
_a : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_ ( self : int ) -> Dict:
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def snake_case_ ( self : Optional[int] ) -> Any:
_a : List[Any] = self.get_config()
_a : Any = 300
return config
def snake_case_ ( self : Dict , __snake_case : int ) -> Optional[Any]:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def snake_case_ ( self : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : Any , __snake_case : int , __snake_case : Tuple ) -> Optional[int]:
_a : Union[str, Any] = DebertaModel(config=__snake_case )
model.to(__snake_case )
model.eval()
_a : str = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case )[0]
_a : int = model(__snake_case , token_type_ids=__snake_case )[0]
_a : Any = model(__snake_case )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def snake_case_ ( self : Tuple , __snake_case : Any , __snake_case : List[str] , __snake_case : Any , __snake_case : Optional[Any] , __snake_case : int , __snake_case : List[str] , __snake_case : List[Any] ) -> str:
_a : Tuple = DebertaForMaskedLM(config=__snake_case )
model.to(__snake_case )
model.eval()
_a : str = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ ( self : int , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : Union[str, Any] , __snake_case : Tuple ) -> str:
_a : List[Any] = self.num_labels
_a : str = DebertaForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
_a : int = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__snake_case )
def snake_case_ ( self : str , __snake_case : str , __snake_case : List[str] , __snake_case : Tuple , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : str , __snake_case : Union[str, Any] ) -> Optional[Any]:
_a : int = self.num_labels
_a : str = DebertaForTokenClassification(config=__snake_case )
model.to(__snake_case )
model.eval()
_a : List[Any] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case_ ( self : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : str , __snake_case : str , __snake_case : Optional[Any] , __snake_case : int , __snake_case : str , __snake_case : Optional[int] ) -> List[Any]:
_a : List[Any] = DebertaForQuestionAnswering(config=__snake_case )
model.to(__snake_case )
model.eval()
_a : Dict = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , start_positions=__snake_case , end_positions=__snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case_ ( self : Optional[int] ) -> int:
_a : Union[str, Any] = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) : int = config_and_inputs
_a : List[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
UpperCAmelCase : Optional[int] = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCAmelCase : Dict = (
{
'feature-extraction': DebertaModel,
'fill-mask': DebertaForMaskedLM,
'question-answering': DebertaForQuestionAnswering,
'text-classification': DebertaForSequenceClassification,
'token-classification': DebertaForTokenClassification,
'zero-shot': DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : List[Any] = False
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : Tuple = False
UpperCAmelCase : List[Any] = False
def snake_case_ ( self : int ) -> str:
_a : Tuple = DebertaModelTester(self )
_a : int = ConfigTester(self , config_class=__snake_case , hidden_size=37 )
def snake_case_ ( self : List[str] ) -> Optional[Any]:
self.config_tester.run_common_tests()
def snake_case_ ( self : Dict ) -> List[Any]:
_a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__snake_case )
def snake_case_ ( self : List[Any] ) -> Optional[Any]:
_a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__snake_case )
def snake_case_ ( self : Dict ) -> str:
_a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__snake_case )
def snake_case_ ( self : Union[str, Any] ) -> List[str]:
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__snake_case )
def snake_case_ ( self : List[str] ) -> Optional[Any]:
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__snake_case )
@slow
def snake_case_ ( self : Any ) -> int:
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : str = DebertaModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( unittest.TestCase ):
@unittest.skip(reason='''Model not available yet''' )
def snake_case_ ( self : Tuple ) -> Union[str, Any]:
pass
@slow
def snake_case_ ( self : Tuple ) -> Tuple:
_a : str = DebertaModel.from_pretrained('''microsoft/deberta-base''' )
_a : Dict = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
_a : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_a : Any = model(__snake_case , attention_mask=__snake_case )[0]
# compare the actual values for a slice.
_a : Tuple = torch.tensor(
[[[-0.5_986, -0.8_055, -0.8_462], [1.4_484, -0.9_348, -0.8_059], [0.3_123, 0.0_032, -1.4_131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __snake_case , atol=1E-4 ) , f"""{output[:, 1:4, 1:4]}""" )
| 471 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class lowerCamelCase ( unittest.TestCase ):
def snake_case_ ( self : Optional[int] ) -> Optional[Any]:
_a : Tuple = tempfile.mkdtemp()
# fmt: off
_a : Dict = ['''''', '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
_a : List[str] = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
_a : Union[str, Any] = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
_a : Tuple = {'''unk_token''': '''<unk>'''}
_a : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_a : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__snake_case ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__snake_case ) )
_a : Optional[int] = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48_145_466, 0.4_578_275, 0.40_821_073],
'''image_std''': [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
_a : str = os.path.join(self.tmpdirname , __snake_case )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__snake_case , __snake_case )
def snake_case_ ( self : List[str] , **__snake_case : Tuple ) -> Union[str, Any]:
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='''!''' , **__snake_case )
def snake_case_ ( self : int , **__snake_case : str ) -> Union[str, Any]:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='''!''' , **__snake_case )
def snake_case_ ( self : Dict , **__snake_case : str ) -> Optional[int]:
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **__snake_case )
def snake_case_ ( self : List[Any] ) -> Dict:
shutil.rmtree(self.tmpdirname )
def snake_case_ ( self : Any ) -> Optional[Any]:
_a : Any = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_a : str = [Image.fromarray(np.moveaxis(__snake_case , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case_ ( self : int ) -> Union[str, Any]:
_a : Any = self.get_tokenizer()
_a : Tuple = self.get_rust_tokenizer()
_a : Tuple = self.get_image_processor()
_a : str = OwlViTProcessor(tokenizer=__snake_case , image_processor=__snake_case )
processor_slow.save_pretrained(self.tmpdirname )
_a : Any = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=__snake_case )
_a : Union[str, Any] = OwlViTProcessor(tokenizer=__snake_case , image_processor=__snake_case )
processor_fast.save_pretrained(self.tmpdirname )
_a : Union[str, Any] = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __snake_case )
self.assertIsInstance(processor_fast.tokenizer , __snake_case )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __snake_case )
self.assertIsInstance(processor_fast.image_processor , __snake_case )
def snake_case_ ( self : List[Any] ) -> Any:
_a : Dict = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_a : int = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_a : List[Any] = self.get_image_processor(do_normalize=__snake_case )
_a : Dict = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__snake_case )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __snake_case )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __snake_case )
def snake_case_ ( self : int ) -> Tuple:
_a : Dict = self.get_image_processor()
_a : List[str] = self.get_tokenizer()
_a : List[str] = OwlViTProcessor(tokenizer=__snake_case , image_processor=__snake_case )
_a : Union[str, Any] = self.prepare_image_inputs()
_a : List[Any] = image_processor(__snake_case , return_tensors='''np''' )
_a : List[Any] = processor(images=__snake_case , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def snake_case_ ( self : List[str] ) -> str:
_a : List[Any] = self.get_image_processor()
_a : List[Any] = self.get_tokenizer()
_a : Union[str, Any] = OwlViTProcessor(tokenizer=__snake_case , image_processor=__snake_case )
_a : Optional[Any] = '''lower newer'''
_a : Any = processor(text=__snake_case , return_tensors='''np''' )
_a : List[str] = tokenizer(__snake_case , return_tensors='''np''' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def snake_case_ ( self : Union[str, Any] ) -> Union[str, Any]:
_a : str = self.get_image_processor()
_a : Tuple = self.get_tokenizer()
_a : Optional[int] = OwlViTProcessor(tokenizer=__snake_case , image_processor=__snake_case )
_a : Optional[Any] = '''lower newer'''
_a : Union[str, Any] = self.prepare_image_inputs()
_a : str = processor(text=__snake_case , images=__snake_case )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__snake_case ):
processor()
def snake_case_ ( self : Any ) -> Union[str, Any]:
_a : List[str] = '''google/owlvit-base-patch32'''
_a : Dict = OwlViTProcessor.from_pretrained(__snake_case )
_a : str = ['''cat''', '''nasa badge''']
_a : Optional[Any] = processor(text=__snake_case )
_a : int = 16
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__snake_case ):
processor()
def snake_case_ ( self : Optional[Any] ) -> Optional[int]:
_a : Union[str, Any] = '''google/owlvit-base-patch32'''
_a : Dict = OwlViTProcessor.from_pretrained(__snake_case )
_a : Optional[int] = [['''cat''', '''nasa badge'''], ['''person''']]
_a : Optional[Any] = processor(text=__snake_case )
_a : Optional[Any] = 16
_a : List[Any] = len(__snake_case )
_a : Any = max([len(__snake_case ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__snake_case ):
processor()
def snake_case_ ( self : Optional[Any] ) -> Optional[Any]:
_a : List[Any] = '''google/owlvit-base-patch32'''
_a : Dict = OwlViTProcessor.from_pretrained(__snake_case )
_a : int = ['''cat''', '''nasa badge''']
_a : Tuple = processor(text=__snake_case )
_a : Any = 16
_a : Any = inputs['''input_ids''']
_a : int = [
[49406, 2368, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49406, 6841, 11301, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def snake_case_ ( self : Tuple ) -> List[str]:
_a : Tuple = self.get_image_processor()
_a : List[Any] = self.get_tokenizer()
_a : Any = OwlViTProcessor(tokenizer=__snake_case , image_processor=__snake_case )
_a : int = self.prepare_image_inputs()
_a : Dict = self.prepare_image_inputs()
_a : Optional[int] = processor(images=__snake_case , query_images=__snake_case )
self.assertListEqual(list(inputs.keys() ) , ['''query_pixel_values''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__snake_case ):
processor()
def snake_case_ ( self : List[str] ) -> Tuple:
_a : Union[str, Any] = self.get_image_processor()
_a : Dict = self.get_tokenizer()
_a : Union[str, Any] = OwlViTProcessor(tokenizer=__snake_case , image_processor=__snake_case )
_a : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_a : List[str] = processor.batch_decode(__snake_case )
_a : Tuple = tokenizer.batch_decode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
| 471 | 1 |
"""simple docstring"""
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
a : int = re.compile(r'''\b(a|an|the)\b''', re.UNICODE)
a : Dict = None
def snake_case__ ( ) ->Tuple:
UpperCAmelCase__ = argparse.ArgumentParser("""Official evaluation script for SQuAD version 2.0.""" )
parser.add_argument("""data_file""" , metavar="""data.json""" , help="""Input data JSON file.""" )
parser.add_argument("""pred_file""" , metavar="""pred.json""" , help="""Model predictions.""" )
parser.add_argument(
"""--out-file""" , """-o""" , metavar="""eval.json""" , help="""Write accuracy metrics to file (default is stdout).""" )
parser.add_argument(
"""--na-prob-file""" , """-n""" , metavar="""na_prob.json""" , help="""Model estimates of probability of no answer.""" )
parser.add_argument(
"""--na-prob-thresh""" , """-t""" , type=_SCREAMING_SNAKE_CASE , default=1.0 , help="""Predict \"\" if no-answer probability exceeds this (default = 1.0).""" , )
parser.add_argument(
"""--out-image-dir""" , """-p""" , metavar="""out_images""" , default=_SCREAMING_SNAKE_CASE , help="""Save precision-recall curves to directory.""" )
parser.add_argument("""--verbose""" , """-v""" , action="""store_true""" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def snake_case__ ( _SCREAMING_SNAKE_CASE ) ->int:
UpperCAmelCase__ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
UpperCAmelCase__ = bool(qa["""answers"""]["""text"""] )
return qid_to_has_ans
def snake_case__ ( _SCREAMING_SNAKE_CASE ) ->Any:
def remove_articles(_SCREAMING_SNAKE_CASE ):
return ARTICLES_REGEX.sub(""" """ , _SCREAMING_SNAKE_CASE )
def white_space_fix(_SCREAMING_SNAKE_CASE ):
return " ".join(text.split() )
def remove_punc(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase__ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_SCREAMING_SNAKE_CASE ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_SCREAMING_SNAKE_CASE ) ) ) )
def snake_case__ ( _SCREAMING_SNAKE_CASE ) ->List[str]:
if not s:
return []
return normalize_answer(_SCREAMING_SNAKE_CASE ).split()
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
return int(normalize_answer(_SCREAMING_SNAKE_CASE ) == normalize_answer(_SCREAMING_SNAKE_CASE ) )
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[str]:
UpperCAmelCase__ = get_tokens(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = get_tokens(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = collections.Counter(_SCREAMING_SNAKE_CASE ) & collections.Counter(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = sum(common.values() )
if len(_SCREAMING_SNAKE_CASE ) == 0 or len(_SCREAMING_SNAKE_CASE ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
UpperCAmelCase__ = 1.0 * num_same / len(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = 1.0 * num_same / len(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = (2 * precision * recall) / (precision + recall)
return fa
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[str]:
UpperCAmelCase__ = {}
UpperCAmelCase__ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
UpperCAmelCase__ = qa["""id"""]
UpperCAmelCase__ = [t for t in qa["""answers"""]["""text"""] if normalize_answer(_SCREAMING_SNAKE_CASE )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
UpperCAmelCase__ = [""""""]
if qid not in preds:
print(F'''Missing prediction for {qid}''' )
continue
UpperCAmelCase__ = preds[qid]
# Take max over all gold answers
UpperCAmelCase__ = max(compute_exact(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for a in gold_answers )
UpperCAmelCase__ = max(compute_fa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for a in gold_answers )
return exact_scores, fa_scores
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
UpperCAmelCase__ = {}
for qid, s in scores.items():
UpperCAmelCase__ = na_probs[qid] > na_prob_thresh
if pred_na:
UpperCAmelCase__ = float(not qid_to_has_ans[qid] )
else:
UpperCAmelCase__ = s
return new_scores
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->Any:
if not qid_list:
UpperCAmelCase__ = len(_SCREAMING_SNAKE_CASE )
return collections.OrderedDict(
[
("""exact""", 100.0 * sum(exact_scores.values() ) / total),
("""f1""", 100.0 * sum(fa_scores.values() ) / total),
("""total""", total),
] )
else:
UpperCAmelCase__ = len(_SCREAMING_SNAKE_CASE )
return collections.OrderedDict(
[
("""exact""", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("""f1""", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("""total""", total),
] )
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
for k in new_eval:
UpperCAmelCase__ = new_eval[k]
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[int]:
plt.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , color="""b""" , alpha=0.2 , where="""post""" )
plt.fill_between(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , step="""post""" , alpha=0.2 , color="""b""" )
plt.xlabel("""Recall""" )
plt.ylabel("""Precision""" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(_SCREAMING_SNAKE_CASE )
plt.savefig(_SCREAMING_SNAKE_CASE )
plt.clf()
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) ->Tuple:
UpperCAmelCase__ = sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : na_probs[k] )
UpperCAmelCase__ = 0.0
UpperCAmelCase__ = 1.0
UpperCAmelCase__ = 0.0
UpperCAmelCase__ = [1.0]
UpperCAmelCase__ = [0.0]
UpperCAmelCase__ = 0.0
for i, qid in enumerate(_SCREAMING_SNAKE_CASE ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
UpperCAmelCase__ = true_pos / float(i + 1 )
UpperCAmelCase__ = true_pos / float(_SCREAMING_SNAKE_CASE )
if i == len(_SCREAMING_SNAKE_CASE ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(_SCREAMING_SNAKE_CASE )
recalls.append(_SCREAMING_SNAKE_CASE )
if out_image:
plot_pr_curve(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return {"ap": 100.0 * avg_prec}
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[int]:
if out_image_dir and not os.path.exists(_SCREAMING_SNAKE_CASE ):
os.makedirs(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
UpperCAmelCase__ = make_precision_recall_eval(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , out_image=os.path.join(_SCREAMING_SNAKE_CASE , """pr_exact.png""" ) , title="""Precision-Recall curve for Exact Match score""" , )
UpperCAmelCase__ = make_precision_recall_eval(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , out_image=os.path.join(_SCREAMING_SNAKE_CASE , """pr_f1.png""" ) , title="""Precision-Recall curve for F1 score""" , )
UpperCAmelCase__ = {k: float(_SCREAMING_SNAKE_CASE ) for k, v in qid_to_has_ans.items()}
UpperCAmelCase__ = make_precision_recall_eval(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , out_image=os.path.join(_SCREAMING_SNAKE_CASE , """pr_oracle.png""" ) , title="""Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)""" , )
merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """pr_exact""" )
merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """pr_f1""" )
merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """pr_oracle""" )
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
if not qid_list:
return
UpperCAmelCase__ = [na_probs[k] for k in qid_list]
UpperCAmelCase__ = np.ones_like(_SCREAMING_SNAKE_CASE ) / float(len(_SCREAMING_SNAKE_CASE ) )
plt.hist(_SCREAMING_SNAKE_CASE , weights=_SCREAMING_SNAKE_CASE , bins=2_0 , range=(0.0, 1.0) )
plt.xlabel("""Model probability of no-answer""" )
plt.ylabel("""Proportion of dataset""" )
plt.title(F'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(_SCREAMING_SNAKE_CASE , F'''na_prob_hist_{name}.png''' ) )
plt.clf()
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[Any]:
UpperCAmelCase__ = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
UpperCAmelCase__ = num_no_ans
UpperCAmelCase__ = cur_score
UpperCAmelCase__ = 0.0
UpperCAmelCase__ = sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : na_probs[k] )
for i, qid in enumerate(_SCREAMING_SNAKE_CASE ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
UpperCAmelCase__ = scores[qid]
else:
if preds[qid]:
UpperCAmelCase__ = -1
else:
UpperCAmelCase__ = 0
cur_score += diff
if cur_score > best_score:
UpperCAmelCase__ = cur_score
UpperCAmelCase__ = na_probs[qid]
return 100.0 * best_score / len(_SCREAMING_SNAKE_CASE ), best_thresh
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Any:
UpperCAmelCase__ , UpperCAmelCase__ = find_best_thresh(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase__ , UpperCAmelCase__ = find_best_thresh(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = best_exact
UpperCAmelCase__ = exact_thresh
UpperCAmelCase__ = best_fa
UpperCAmelCase__ = fa_thresh
def snake_case__ ( ) ->List[Any]:
with open(OPTS.data_file ) as f:
UpperCAmelCase__ = json.load(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = dataset_json["""data"""]
with open(OPTS.pred_file ) as f:
UpperCAmelCase__ = json.load(_SCREAMING_SNAKE_CASE )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
UpperCAmelCase__ = json.load(_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase__ = {k: 0.0 for k in preds}
UpperCAmelCase__ = make_qid_to_has_ans(_SCREAMING_SNAKE_CASE ) # maps qid to True/False
UpperCAmelCase__ = [k for k, v in qid_to_has_ans.items() if v]
UpperCAmelCase__ = [k for k, v in qid_to_has_ans.items() if not v]
UpperCAmelCase__ , UpperCAmelCase__ = get_raw_scores(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = apply_no_ans_threshold(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.na_prob_thresh )
UpperCAmelCase__ = apply_no_ans_threshold(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.na_prob_thresh )
UpperCAmelCase__ = make_eval_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if has_ans_qids:
UpperCAmelCase__ = make_eval_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , qid_list=_SCREAMING_SNAKE_CASE )
merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """HasAns""" )
if no_ans_qids:
UpperCAmelCase__ = make_eval_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , qid_list=_SCREAMING_SNAKE_CASE )
merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """NoAns""" )
if OPTS.na_prob_file:
find_all_best_thresh(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.out_image_dir )
histogram_na_prob(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.out_image_dir , """hasAns""" )
histogram_na_prob(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.out_image_dir , """noAns""" )
if OPTS.out_file:
with open(OPTS.out_file , """w""" ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
print(json.dumps(_SCREAMING_SNAKE_CASE , indent=2 ) )
if __name__ == "__main__":
a : Union[str, Any] = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('''Agg''')
import matplotlib.pyplot as plt
main()
| 422 |
"""simple docstring"""
def snake_case__ ( _SCREAMING_SNAKE_CASE ) ->Tuple: # noqa: E741
UpperCAmelCase__ = len(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = 0
UpperCAmelCase__ = [0] * n
UpperCAmelCase__ = [False] * n
UpperCAmelCase__ = [False] * n
def dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if parent == root:
out_edge_count += 1
UpperCAmelCase__ = True
UpperCAmelCase__ = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
UpperCAmelCase__ = dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
UpperCAmelCase__ = True
# AP found via cycle
if at == low[to]:
UpperCAmelCase__ = True
else:
UpperCAmelCase__ = min(low[at] , _SCREAMING_SNAKE_CASE )
return out_edge_count
for i in range(_SCREAMING_SNAKE_CASE ):
if not visited[i]:
UpperCAmelCase__ = 0
UpperCAmelCase__ = dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , -1 , _SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = out_edge_count > 1
for x in range(len(_SCREAMING_SNAKE_CASE ) ):
if is_art[x] is True:
print(_SCREAMING_SNAKE_CASE )
# Adjacency list of graph
a : Tuple = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 422 | 1 |
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : List[Any] = logging.get_logger()
@dataclass
class SCREAMING_SNAKE_CASE :
lowerCAmelCase = 42
lowerCAmelCase = field(default_factory=a__ )
lowerCAmelCase = field(default_factory=a__ )
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Dict = len(list(m.modules())) == 1 or isinstance(_UpperCAmelCase , nn.Convad) or isinstance(_UpperCAmelCase , nn.BatchNormad)
if has_not_submodules:
self.traced.append(_UpperCAmelCase)
def __call__( self , _UpperCAmelCase):
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook))
self.module(_UpperCAmelCase)
[x.remove() for x in self.handles]
return self
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return list(filter(lambda _UpperCAmelCase: len(list(x.state_dict().keys())) > 0 , self.traced))
@dataclass
class SCREAMING_SNAKE_CASE :
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 1
lowerCAmelCase = field(default_factory=a__ )
lowerCAmelCase = field(default_factory=a__ )
lowerCAmelCase = True
def __call__( self , _UpperCAmelCase):
'''simple docstring'''
__A : Union[str, Any] = Tracker(self.dest)(_UpperCAmelCase).parametrized
__A : Any = Tracker(self.src)(_UpperCAmelCase).parametrized
__A : int = list(filter(lambda _UpperCAmelCase: type(_UpperCAmelCase) not in self.src_skip , _UpperCAmelCase))
__A : Tuple = list(filter(lambda _UpperCAmelCase: type(_UpperCAmelCase) not in self.dest_skip , _UpperCAmelCase))
if len(_UpperCAmelCase) != len(_UpperCAmelCase) and self.raise_if_mismatch:
raise Exception(
F'Numbers of operations are different. Source module has {len(_UpperCAmelCase)} operations while'
F' destination module has {len(_UpperCAmelCase)}.')
for dest_m, src_m in zip(_UpperCAmelCase , _UpperCAmelCase):
dest_m.load_state_dict(src_m.state_dict())
if self.verbose == 1:
print(F'Transfered from={src_m} to={dest_m}')
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self , _UpperCAmelCase):
'''simple docstring'''
super().__init__()
__A : List[Tuple[str, nn.Module]] = []
# - get the stem
feature_blocks.append(('conv1', model.stem))
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith('block'), F'Unexpected layer name {k}'
__A : Union[str, Any] = len(_UpperCAmelCase) + 1
feature_blocks.append((F'res{block_index}', v))
__A : List[str] = nn.ModuleDict(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return get_trunk_forward_outputs(
_UpperCAmelCase , out_feat_keys=_UpperCAmelCase , feature_blocks=self._feature_blocks , )
class SCREAMING_SNAKE_CASE (a__ ):
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : str = x.split('-')
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:])
def __getitem__( self , _UpperCAmelCase):
'''simple docstring'''
if x not in self:
__A : Dict = self.convert_name_to_timm(_UpperCAmelCase)
__A : Union[str, Any] = partial(lambda: (timm.create_model(_UpperCAmelCase , pretrained=_UpperCAmelCase).eval(), None))
else:
__A : Tuple = super().__getitem__(_UpperCAmelCase)
return val
class SCREAMING_SNAKE_CASE (a__ ):
def __getitem__( self , _UpperCAmelCase):
'''simple docstring'''
if "seer" in x and "in1k" not in x:
__A : Union[str, Any] = RegNetModel
else:
__A : Union[str, Any] = RegNetForImageClassification
return val
def _lowerCAmelCase ( __snake_case : str , __snake_case : Tuple , __snake_case : List[Tuple[str, str]] ) -> Optional[int]:
for from_key, to_key in keys:
__A : Optional[int] = from_state_dict[from_key].clone()
print(f'Copied key={from_key} to={to_key}' )
return to_state_dict
def _lowerCAmelCase ( __snake_case : str , __snake_case : Callable[[], nn.Module] , __snake_case : Callable[[], nn.Module] , __snake_case : RegNetConfig , __snake_case : Path , __snake_case : bool = True , ) -> Any:
print(f'Converting {name}...' )
with torch.no_grad():
__A ,__A : Dict = from_model_func()
__A : Optional[int] = our_model_func(__snake_case ).eval()
__A : Optional[int] = ModuleTransfer(src=__snake_case , dest=__snake_case , raise_if_mismatch=__snake_case )
__A : int = torch.randn((1, 3, 2_24, 2_24) )
module_transfer(__snake_case )
if from_state_dict is not None:
__A : int = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
__A : str = [('0.clf.0.weight', 'classifier.1.weight'), ('0.clf.0.bias', 'classifier.1.bias')]
__A : Tuple = manually_copy_vissl_head(__snake_case , our_model.state_dict() , __snake_case )
our_model.load_state_dict(__snake_case )
__A : Tuple = our_model(__snake_case , output_hidden_states=__snake_case )
__A : Optional[int] = (
our_outputs.logits if isinstance(__snake_case , __snake_case ) else our_outputs.last_hidden_state
)
__A : Union[str, Any] = from_model(__snake_case )
__A : int = from_output[-1] if type(__snake_case ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
__A : List[Any] = our_outputs.hidden_states[-1]
assert torch.allclose(__snake_case , __snake_case ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='Add model' , use_temp_dir=__snake_case , )
__A : List[Any] = 2_24 if 'seer' not in name else 3_84
# we can use the convnext one
__A : List[str] = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' , size=__snake_case )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='Add image processor' , use_temp_dir=__snake_case , )
print(f'Pushed {name}' )
def _lowerCAmelCase ( __snake_case : Path , __snake_case : str = None , __snake_case : bool = True ) -> Any:
__A : List[Any] = 'imagenet-1k-id2label.json'
__A : int = 10_00
__A : int = (1, num_labels)
__A : Dict = 'huggingface/label-files'
__A : Dict = num_labels
__A : List[Any] = json.load(open(cached_download(hf_hub_url(__snake_case , __snake_case , repo_type='dataset' ) ) , 'r' ) )
__A : Optional[Any] = {int(__snake_case ): v for k, v in idalabel.items()}
__A : Optional[Any] = idalabel
__A : Tuple = {v: k for k, v in idalabel.items()}
__A : Optional[Any] = partial(__snake_case , num_labels=__snake_case , idalabel=__snake_case , labelaid=__snake_case )
__A : Union[str, Any] = {
'regnet-x-002': ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 1_52, 3_68] , groups_width=8 , layer_type='x' ),
'regnet-x-004': ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 1_60, 3_84] , groups_width=16 , layer_type='x' ),
'regnet-x-006': ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 2_40, 5_28] , groups_width=24 , layer_type='x' ),
'regnet-x-008': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 1_28, 2_88, 6_72] , groups_width=16 , layer_type='x' ),
'regnet-x-016': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 1_68, 4_08, 9_12] , groups_width=24 , layer_type='x' ),
'regnet-x-032': ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 1_92, 4_32, 10_08] , groups_width=48 , layer_type='x' ),
'regnet-x-040': ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 2_40, 5_60, 13_60] , groups_width=40 , layer_type='x' ),
'regnet-x-064': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[1_68, 3_92, 7_84, 16_24] , groups_width=56 , layer_type='x' ),
'regnet-x-080': ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 2_40, 7_20, 19_20] , groups_width=1_20 , layer_type='x' ),
'regnet-x-120': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[2_24, 4_48, 8_96, 22_40] , groups_width=1_12 , layer_type='x' ),
'regnet-x-160': ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[2_56, 5_12, 8_96, 20_48] , groups_width=1_28 , layer_type='x' ),
'regnet-x-320': ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[3_36, 6_72, 13_44, 25_20] , groups_width=1_68 , layer_type='x' ),
# y variant
'regnet-y-002': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 1_52, 3_68] , groups_width=8 ),
'regnet-y-004': ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 1_04, 2_08, 4_40] , groups_width=8 ),
'regnet-y-006': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 1_12, 2_56, 6_08] , groups_width=16 ),
'regnet-y-008': ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 1_28, 3_20, 7_68] , groups_width=16 ),
'regnet-y-016': ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 1_20, 3_36, 8_88] , groups_width=24 ),
'regnet-y-032': ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 2_16, 5_76, 15_12] , groups_width=24 ),
'regnet-y-040': ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[1_28, 1_92, 5_12, 10_88] , groups_width=64 ),
'regnet-y-064': ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[1_44, 2_88, 5_76, 12_96] , groups_width=72 ),
'regnet-y-080': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[1_68, 4_48, 8_96, 20_16] , groups_width=56 ),
'regnet-y-120': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[2_24, 4_48, 8_96, 22_40] , groups_width=1_12 ),
'regnet-y-160': ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[2_24, 4_48, 12_32, 30_24] , groups_width=1_12 ),
'regnet-y-320': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
'regnet-y-320-seer': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ),
'regnet-y-640-seer': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[3_28, 9_84, 19_68, 49_20] , groups_width=3_28 ),
'regnet-y-1280-seer': RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[5_28, 10_56, 29_04, 73_92] , groups_width=2_64 ),
'regnet-y-2560-seer': RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[6_40, 16_96, 25_44, 50_88] , groups_width=6_40 ),
'regnet-y-10b-seer': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[20_20, 40_40, 1_11_10, 2_82_80] , groups_width=10_10 ),
# finetuned on imagenet
'regnet-y-320-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ),
'regnet-y-640-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[3_28, 9_84, 19_68, 49_20] , groups_width=3_28 ),
'regnet-y-1280-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[5_28, 10_56, 29_04, 73_92] , groups_width=2_64 ),
'regnet-y-2560-seer-in1k': ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[6_40, 16_96, 25_44, 50_88] , groups_width=6_40 ),
'regnet-y-10b-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[20_20, 40_40, 1_11_10, 2_82_80] , groups_width=10_10 ),
}
__A : List[Any] = NameToOurModelFuncMap()
__A : List[str] = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(__snake_case : str , __snake_case : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]:
__A : Tuple = torch.hub.load_state_dict_from_url(__snake_case , model_dir=str(__snake_case ) , map_location='cpu' )
__A : Dict = model_func()
# check if we have a head, if yes add it
__A : List[Any] = files['classy_state_dict']['base_model']['model']
__A : Optional[int] = model_state_dict['trunk']
model.load_state_dict(__snake_case )
return model.eval(), model_state_dict["heads"]
# pretrained
__A : int = partial(
__snake_case , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__A : Optional[Any] = partial(
__snake_case , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__A : int = partial(
__snake_case , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
__A : Any = partial(
__snake_case , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=10_10 , w_a=17_44 , w_a=620.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
__A : Dict = partial(
__snake_case , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__A : List[str] = partial(
__snake_case , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__A : Optional[int] = partial(
__snake_case , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
__A : int = partial(
__snake_case , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=10_10 , w_a=17_44 , w_a=620.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
__snake_case , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , __snake_case , __snake_case , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
__snake_case , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , __snake_case , __snake_case , __snake_case , )
return config, expected_shape
if __name__ == "__main__":
lowercase__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported regnet* architecture,'''
''' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
lowercase__ : Optional[int] = parser.parse_args()
lowercase__ : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub) | 8 |
'''simple docstring'''
def lowercase_ ( _lowercase , _lowercase , _lowercase , _lowercase ) -> bool:
'''simple docstring'''
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> bool:
'''simple docstring'''
if curr_ind == len(_lowercase ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(_lowercase ) ):
if valid_connection(_lowercase , _lowercase , _lowercase , _lowercase ):
# Insert current vertex into path as next transition
lowerCamelCase_ : Any = next_ver
# Validate created path
if util_hamilton_cycle(_lowercase , _lowercase , curr_ind + 1 ):
return True
# Backtrack
lowerCamelCase_ : Union[str, Any] = -1
return False
def lowercase_ ( _lowercase , _lowercase = 0 ) -> list[int]:
'''simple docstring'''
lowerCamelCase_ : int = [-1] * (len(_lowercase ) + 1)
# initialize start and end of path with starting index
lowerCamelCase_ : str = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(_lowercase , _lowercase , 1 ) else []
| 422 | 0 |
import logging
from transformers.configuration_utils import PretrainedConfig
UpperCamelCase__ =logging.getLogger(__name__)
class lowerCAmelCase__( _A ):
'''simple docstring'''
__snake_case = 'masked_bert'
def __init__( self , __lowerCamelCase=3_0_5_2_2 , __lowerCamelCase=7_6_8 , __lowerCamelCase=1_2 , __lowerCamelCase=1_2 , __lowerCamelCase=3_0_7_2 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=5_1_2 , __lowerCamelCase=2 , __lowerCamelCase=0.02 , __lowerCamelCase=1E-12 , __lowerCamelCase=0 , __lowerCamelCase="topK" , __lowerCamelCase="constant" , __lowerCamelCase=0.0 , **__lowerCamelCase , ) -> Union[str, Any]:
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_size
_SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
_SCREAMING_SNAKE_CASE : Optional[int] = num_hidden_layers
_SCREAMING_SNAKE_CASE : Any = num_attention_heads
_SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
_SCREAMING_SNAKE_CASE : List[str] = intermediate_size
_SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : Tuple = max_position_embeddings
_SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
_SCREAMING_SNAKE_CASE : Optional[Any] = layer_norm_eps
_SCREAMING_SNAKE_CASE : Optional[int] = pruning_method
_SCREAMING_SNAKE_CASE : str = mask_init
_SCREAMING_SNAKE_CASE : Optional[Any] = mask_scale
| 710 |
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
_SCREAMING_SNAKE_CASE : Optional[Any] = str(bin(__lowerCamelCase ) )[2:] # remove the leading "0b"
_SCREAMING_SNAKE_CASE : List[str] = str(bin(__lowerCamelCase ) )[2:]
_SCREAMING_SNAKE_CASE : Optional[Any] = max(len(__lowerCamelCase ), len(__lowerCamelCase ) )
return "0b" + "".join(
str(int("1" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(__lowerCamelCase ), b_binary.zfill(__lowerCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 381 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
lowerCAmelCase_ : List[str] = False
class lowerCamelCase_ ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
def __lowercase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = VersatileDiffusionImageVariationPipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = pipe(
image=lowerCAmelCase__ , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
SCREAMING_SNAKE_CASE : Dict = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE : Dict = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 527 |
'''simple docstring'''
from collections import defaultdict
def UpperCAmelCase ( A : int ):
SCREAMING_SNAKE_CASE : List[Any] = 1
SCREAMING_SNAKE_CASE : Dict = True
for v in tree[start]:
if v not in visited:
ret += dfs(A )
if ret % 2 == 0:
cuts.append(A )
return ret
def UpperCAmelCase ( ):
dfs(1 )
if __name__ == "__main__":
lowerCAmelCase_ , lowerCAmelCase_ : Dict = 10, 9
lowerCAmelCase_ : Dict = defaultdict(list)
lowerCAmelCase_ : dict[int, bool] = {}
lowerCAmelCase_ : list[int] = []
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : Dict = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 527 | 1 |
import numpy
# List of input, output pairs
UpperCamelCase__ =(
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
UpperCamelCase__ =(((515, 22, 13), 555), ((61, 35, 49), 150))
UpperCamelCase__ =[2, 4, 1, 5]
UpperCamelCase__ =len(train_data)
UpperCamelCase__ =0.009
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase="train" ):
return calculate_hypothesis_value(__lowerCamelCase, __lowerCamelCase ) - output(
__lowerCamelCase, __lowerCamelCase )
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = 0
for i in range(len(__lowerCamelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase=m ):
_SCREAMING_SNAKE_CASE : List[str] = 0
for i in range(__lowerCamelCase ):
if index == -1:
summation_value += _error(__lowerCamelCase )
else:
summation_value += _error(__lowerCamelCase ) * train_data[i][0][index]
return summation_value
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[str] = summation_of_cost_derivative(__lowerCamelCase, __lowerCamelCase ) / m
return cost_derivative_value
def lowerCamelCase__ ():
global parameter_vector
# Tune these values to set a tolerance value for predicted output
_SCREAMING_SNAKE_CASE : Dict = 0.00_0002
_SCREAMING_SNAKE_CASE : List[Any] = 0
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0
while True:
j += 1
_SCREAMING_SNAKE_CASE : List[str] = [0, 0, 0, 0]
for i in range(0, len(__lowerCamelCase ) ):
_SCREAMING_SNAKE_CASE : int = get_cost_derivative(i - 1 )
_SCREAMING_SNAKE_CASE : Optional[int] = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
__lowerCamelCase, __lowerCamelCase, atol=__lowerCamelCase, rtol=__lowerCamelCase, ):
break
_SCREAMING_SNAKE_CASE : Optional[int] = temp_parameter_vector
print(("Number of iterations:", j) )
def lowerCamelCase__ ():
for i in range(len(__lowerCamelCase ) ):
print(("Actual output value:", output(__lowerCamelCase, "test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(__lowerCamelCase, "test" )) )
if __name__ == "__main__":
run_gradient_descent()
print('\nTesting gradient descent for a linear hypothesis function.\n')
test_gradient_descent()
| 716 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowerCAmelCase__( __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = StableDiffusionPanoramaPipeline
__snake_case = TEXT_TO_IMAGE_PARAMS
__snake_case = TEXT_TO_IMAGE_BATCH_PARAMS
__snake_case = TEXT_TO_IMAGE_IMAGE_PARAMS
__snake_case = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase_ ( self ) -> Any:
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : Tuple = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=1 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , )
_SCREAMING_SNAKE_CASE : Optional[Any] = DDIMScheduler()
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : Optional[Any] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTextModel(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_SCREAMING_SNAKE_CASE : str = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=0 ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = {
"prompt": "a photo of the dolomites",
"generator": generator,
# Setting height and width to None to prevent OOMs on CPU.
"height": None,
"width": None,
"num_inference_steps": 1,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : Any = "cpu" # ensure determinism for the device-dependent torch.Generator
_SCREAMING_SNAKE_CASE : Any = self.get_dummy_components()
_SCREAMING_SNAKE_CASE : int = StableDiffusionPanoramaPipeline(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = self.get_dummy_inputs(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = sd_pipe(**__lowerCamelCase ).images
_SCREAMING_SNAKE_CASE : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self ) -> Union[str, Any]:
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCamelCase_ ( self ) -> List[str]:
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25E-3 )
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
_SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_components()
_SCREAMING_SNAKE_CASE : Any = StableDiffusionPanoramaPipeline(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = self.get_dummy_inputs(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = "french fries"
_SCREAMING_SNAKE_CASE : str = sd_pipe(**__lowerCamelCase , negative_prompt=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = output.images
_SCREAMING_SNAKE_CASE : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_SCREAMING_SNAKE_CASE : Optional[int] = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : Any = "cpu" # ensure determinism for the device-dependent torch.Generator
_SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_components()
_SCREAMING_SNAKE_CASE : Dict = StableDiffusionPanoramaPipeline(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = sd_pipe(**__lowerCamelCase , view_batch_size=2 )
_SCREAMING_SNAKE_CASE : List[str] = output.images
_SCREAMING_SNAKE_CASE : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_SCREAMING_SNAKE_CASE : List[Any] = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self ) -> int:
_SCREAMING_SNAKE_CASE : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
_SCREAMING_SNAKE_CASE : int = self.get_dummy_components()
_SCREAMING_SNAKE_CASE : Tuple = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" )
_SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionPanoramaPipeline(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = self.get_dummy_inputs(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = sd_pipe(**__lowerCamelCase ).images
_SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_SCREAMING_SNAKE_CASE : Any = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self ) -> int:
_SCREAMING_SNAKE_CASE : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
_SCREAMING_SNAKE_CASE : int = self.get_dummy_components()
_SCREAMING_SNAKE_CASE : List[str] = PNDMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" , skip_prk_steps=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionPanoramaPipeline(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = self.get_dummy_inputs(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = sd_pipe(**__lowerCamelCase ).images
_SCREAMING_SNAKE_CASE : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> List[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self , __lowerCamelCase=0 ) -> int:
_SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = {
"prompt": "a photo of the dolomites",
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def UpperCamelCase_ ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : int = "stabilityai/stable-diffusion-2-base"
_SCREAMING_SNAKE_CASE : Any = DDIMScheduler.from_pretrained(__lowerCamelCase , subfolder="scheduler" )
_SCREAMING_SNAKE_CASE : Any = StableDiffusionPanoramaPipeline.from_pretrained(__lowerCamelCase , scheduler=__lowerCamelCase , safety_checker=__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing()
_SCREAMING_SNAKE_CASE : int = self.get_inputs()
_SCREAMING_SNAKE_CASE : Optional[int] = pipe(**__lowerCamelCase ).images
_SCREAMING_SNAKE_CASE : List[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 2_0_4_8, 3)
_SCREAMING_SNAKE_CASE : List[str] = np.array(
[
0.3696_8392,
0.2702_5372,
0.3244_6766,
0.2837_9387,
0.3636_3274,
0.3073_3347,
0.2710_0027,
0.2705_4125,
0.2553_6096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-2
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Dict = StableDiffusionPanoramaPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-base" , safety_checker=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing()
_SCREAMING_SNAKE_CASE : Tuple = self.get_inputs()
_SCREAMING_SNAKE_CASE : Tuple = pipe(**__lowerCamelCase ).images
_SCREAMING_SNAKE_CASE : int = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 2_0_4_8, 3)
_SCREAMING_SNAKE_CASE : Optional[Any] = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : Optional[int] = 0
def callback_fn(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> None:
_SCREAMING_SNAKE_CASE : Dict = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
_SCREAMING_SNAKE_CASE : str = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 2_5_6)
_SCREAMING_SNAKE_CASE : List[Any] = latents[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE : Dict = np.array(
[
0.1868_1869,
0.3390_7816,
0.536_1276,
0.1443_2865,
-0.0285_6611,
-0.7394_1123,
0.2339_7987,
0.4732_2682,
-0.3782_3164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
_SCREAMING_SNAKE_CASE : Union[str, Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 2_5_6)
_SCREAMING_SNAKE_CASE : str = latents[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE : Dict = np.array(
[
0.1853_9645,
0.3398_7248,
0.537_8559,
0.1443_7142,
-0.0245_5261,
-0.733_8317,
0.2399_0755,
0.4735_6272,
-0.378_6505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
_SCREAMING_SNAKE_CASE : List[Any] = False
_SCREAMING_SNAKE_CASE : Optional[int] = "stabilityai/stable-diffusion-2-base"
_SCREAMING_SNAKE_CASE : Optional[Any] = DDIMScheduler.from_pretrained(__lowerCamelCase , subfolder="scheduler" )
_SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionPanoramaPipeline.from_pretrained(__lowerCamelCase , scheduler=__lowerCamelCase , safety_checker=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing()
_SCREAMING_SNAKE_CASE : List[str] = self.get_inputs()
pipe(**__lowerCamelCase , callback=__lowerCamelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def UpperCamelCase_ ( self ) -> Optional[int]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_SCREAMING_SNAKE_CASE : List[Any] = "stabilityai/stable-diffusion-2-base"
_SCREAMING_SNAKE_CASE : Dict = DDIMScheduler.from_pretrained(__lowerCamelCase , subfolder="scheduler" )
_SCREAMING_SNAKE_CASE : Optional[Any] = StableDiffusionPanoramaPipeline.from_pretrained(__lowerCamelCase , scheduler=__lowerCamelCase , safety_checker=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_SCREAMING_SNAKE_CASE : Dict = self.get_inputs()
_SCREAMING_SNAKE_CASE : str = pipe(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 1_0**9 | 381 | 0 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
__lowerCamelCase = '''\
@inproceedings{Rajpurkar2016SQuAD10,
title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},
author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},
booktitle={EMNLP},
year={2016}
}
'''
__lowerCamelCase = '''
This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).
Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by
crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,
from the corresponding reading passage, or the question might be unanswerable.
'''
__lowerCamelCase = '''
Computes SQuAD scores (F1 and EM).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair as given in the references (see below)
- \'prediction_text\': the text of the answer
references: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair (see above),
- \'answers\': a Dict in the SQuAD dataset format
{
\'text\': list of possible texts for the answer, as a list of strings
\'answer_start\': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
\'exact_match\': Exact match (the normalized answer exactly match the gold answer)
\'f1\': The F-score of predicted tokens versus the gold answer
Examples:
>>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]
>>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]
>>> squad_metric = datasets.load_metric(\"squad\")
>>> results = squad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 100.0, \'f1\': 100.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
def snake_case_ ( self ) -> str:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {"""id""": datasets.Value("""string""" ), """prediction_text""": datasets.Value("""string""" )},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , reference_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
A_ = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
A_ = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
A_ = evaluate(dataset=_UpperCAmelCase , predictions=_UpperCAmelCase )
return score
| 288 |
"""simple docstring"""
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
return round(float(moles / volume ) * nfactor )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | 0 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__(self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=30 , lowerCAmelCase_=2 , lowerCAmelCase_=3 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=32 , lowerCAmelCase_=2 , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=10 , lowerCAmelCase_=0.02 , lowerCAmelCase_=3 , lowerCAmelCase_=None , ):
A_ : int = parent
A_ : List[Any] = batch_size
A_ : Any = image_size
A_ : Optional[Any] = patch_size
A_ : Optional[Any] = num_channels
A_ : int = is_training
A_ : str = use_labels
A_ : Tuple = hidden_size
A_ : Optional[Any] = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : Union[str, Any] = intermediate_size
A_ : List[Any] = hidden_act
A_ : Optional[Any] = hidden_dropout_prob
A_ : str = attention_probs_dropout_prob
A_ : Optional[int] = type_sequence_label_size
A_ : Dict = initializer_range
A_ : List[Any] = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A_ : List[str] = (image_size // patch_size) ** 2
A_ : Dict = num_patches + 1
def lowerCamelCase(self ):
A_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Dict = None
if self.use_labels:
A_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : int = self.get_config()
return config, pixel_values, labels
def lowerCamelCase(self ):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , )
def lowerCamelCase(self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
A_ : str = TFViTModel(config=lowerCAmelCase_ )
A_ : str = model(lowerCAmelCase_ , training=lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
A_ : Dict = self.image_size // 2
A_ : Tuple = pixel_values[:, :, :image_size, :image_size]
A_ : int = model(lowerCAmelCase_ , interpolate_pos_encoding=lowerCAmelCase_ , training=lowerCAmelCase_ )
A_ : str = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def lowerCamelCase(self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
A_ : int = self.type_sequence_label_size
A_ : Union[str, Any] = TFViTForImageClassification(lowerCAmelCase_ )
A_ : int = model(lowerCAmelCase_ , labels=lowerCAmelCase_ , training=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
A_ : Any = self.image_size // 2
A_ : str = pixel_values[:, :, :image_size, :image_size]
A_ : int = model(lowerCAmelCase_ , interpolate_pos_encoding=lowerCAmelCase_ , training=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A_ : Union[str, Any] = 1
A_ : Union[str, Any] = TFViTForImageClassification(lowerCAmelCase_ )
A_ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ : Dict = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase(self ):
A_ : int = self.prepare_config_and_inputs()
A_ , A_ , A_ : List[Any] = config_and_inputs
A_ : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
_A : List[str] = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
_A : Any = (
{"""feature-extraction""": TFViTModel, """image-classification""": TFViTForImageClassification}
if is_tf_available()
else {}
)
_A : List[str] = False
_A : int = False
_A : List[Any] = False
def lowerCamelCase(self ):
A_ : Dict = TFViTModelTester(self )
A_ : Any = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=37 )
def lowerCamelCase(self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def lowerCamelCase(self ):
pass
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def lowerCamelCase(self ):
pass
def lowerCamelCase(self ):
A_ , A_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Union[str, Any] = model_class(lowerCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
A_ : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase_ , tf.keras.layers.Layer ) )
def lowerCamelCase(self ):
A_ , A_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Optional[Any] = model_class(lowerCAmelCase_ )
A_ : Union[str, Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : Union[str, Any] = [*signature.parameters.keys()]
A_ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def lowerCamelCase(self ):
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowerCamelCase(self ):
A_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
@slow
def lowerCamelCase(self ):
A_ : List[str] = TFViTModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(lowerCAmelCase_ )
def __UpperCamelCase ( ):
A_ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCamelCase(self ):
return ViTImageProcessor.from_pretrained("""google/vit-base-patch16-224""" ) if is_vision_available() else None
@slow
def lowerCamelCase(self ):
A_ : List[Any] = TFViTForImageClassification.from_pretrained("""google/vit-base-patch16-224""" )
A_ : List[str] = self.default_image_processor
A_ : Optional[Any] = prepare_img()
A_ : List[str] = image_processor(images=lowerCAmelCase_ , return_tensors="""tf""" )
# forward pass
A_ : Union[str, Any] = model(**lowerCAmelCase_ )
# verify the logits
A_ : Dict = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
A_ : Dict = tf.constant([-0.2744, 0.8215, -0.0836] )
tf.debugging.assert_near(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 )
| 480 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
def __UpperCamelCase ( snake_case__ , snake_case__=False ):
A_ : Dict = []
# fmt: off
# stem:
rename_keys.append(("""cls_token""", """vit.embeddings.cls_token""") )
rename_keys.append(("""pos_embed""", """vit.embeddings.position_embeddings""") )
rename_keys.append(("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias""") )
# backbone
rename_keys.append(("""patch_embed.backbone.stem.conv.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.bias""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias""") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias""") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A_ : Dict = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
# fmt: on
return rename_keys
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=False ):
for i in range(config.num_hidden_layers ):
if base_model:
A_ : Any = """"""
else:
A_ : List[Any] = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A_ : Tuple = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
A_ : Union[str, Any] = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
A_ : Dict = in_proj_weight[
: config.hidden_size, :
]
A_ : int = in_proj_bias[: config.hidden_size]
A_ : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A_ : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A_ : List[str] = in_proj_weight[
-config.hidden_size :, :
]
A_ : Optional[Any] = in_proj_bias[-config.hidden_size :]
def __UpperCamelCase ( snake_case__ ):
A_ : Tuple = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ ):
A_ : Union[str, Any] = dct.pop(snake_case__ )
A_ : List[Any] = val
def __UpperCamelCase ( ):
A_ : Any = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : str = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=False ):
A_ : str = BitConfig(
global_padding="""same""" , layer_type="""bottleneck""" , depths=(3, 4, 9) , out_features=["""stage3"""] , embedding_dynamic_padding=snake_case__ , )
A_ : int = ViTHybridConfig(backbone_config=snake_case__ , image_size=384 , num_labels=1_000 )
A_ : Any = False
# load original model from timm
A_ : str = timm.create_model(snake_case__ , pretrained=snake_case__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A_ : Optional[Any] = timm_model.state_dict()
if base_model:
remove_classification_head_(snake_case__ )
A_ : Dict = create_rename_keys(snake_case__ , snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
read_in_q_k_v(snake_case__ , snake_case__ , snake_case__ )
A_ : Any = """huggingface/label-files"""
A_ : Any = """imagenet-1k-id2label.json"""
A_ : Any = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="""dataset""" ) , """r""" ) )
A_ : Optional[int] = {int(snake_case__ ): v for k, v in idalabel.items()}
A_ : Optional[int] = idalabel
A_ : Dict = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
A_ : int = ViTHybridModel(snake_case__ ).eval()
else:
A_ : Union[str, Any] = ViTHybridForImageClassification(snake_case__ ).eval()
model.load_state_dict(snake_case__ )
# create image processor
A_ : Any = create_transform(**resolve_data_config({} , model=snake_case__ ) )
A_ : List[Any] = transform.transforms
A_ : int = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
A_ : Optional[int] = ViTHybridImageProcessor(
do_resize=snake_case__ , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=snake_case__ , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=snake_case__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
A_ : Tuple = prepare_img()
A_ : List[str] = transform(snake_case__ ).unsqueeze(0 )
A_ : int = processor(snake_case__ , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(snake_case__ , snake_case__ )
# verify logits
with torch.no_grad():
A_ : List[str] = model(snake_case__ )
A_ : Any = outputs.logits
print("""Predicted class:""" , logits.argmax(-1 ).item() )
if base_model:
A_ : int = timm_model.forward_features(snake_case__ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(snake_case__ , outputs.pooler_output , atol=1E-3 )
else:
A_ : Optional[int] = timm_model(snake_case__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(snake_case__ , outputs.logits , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case__ )
print(F"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(snake_case__ )
if push_to_hub:
print(F"""Pushing model and processor to the hub {vit_name}""" )
model.push_to_hub(F"""ybelkada/{vit_name}""" )
processor.push_to_hub(F"""ybelkada/{vit_name}""" )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_r50_s16_384",
type=str,
help="Name of the hybrid ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
_lowerCAmelCase = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 480 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Any = {
'''configuration_blip_2''': [
'''BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Blip2Config''',
'''Blip2QFormerConfig''',
'''Blip2VisionConfig''',
],
'''processing_blip_2''': ['''Blip2Processor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : str = [
'''BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Blip2Model''',
'''Blip2QFormerModel''',
'''Blip2PreTrainedModel''',
'''Blip2ForConditionalGeneration''',
'''Blip2VisionModel''',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
__UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : Tuple = {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/config.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/config.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/config.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/config.json''',
'''bert-base-multilingual-uncased''': '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json''',
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/config.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/config.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-base-cased-finetuned-mrpc''': '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json''',
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json''',
'''bert-base-german-dbmdz-uncased''': '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese''': '''https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'''
),
'''wietsedv/bert-base-dutch-cased''': '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json''',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class a ( a__ ):
snake_case__ = '''bert'''
def __init__( self , _snake_case=3_05_22 , _snake_case=7_68 , _snake_case=12 , _snake_case=12 , _snake_case=30_72 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=2 , _snake_case=0.02 , _snake_case=1E-12 , _snake_case=0 , _snake_case="absolute" , _snake_case=True , _snake_case=None , **_snake_case , ):
"""simple docstring"""
super().__init__(pad_token_id=_snake_case , **_snake_case )
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = position_embedding_type
lowerCAmelCase = use_cache
lowerCAmelCase = classifier_dropout
class a ( a__ ):
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
lowerCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 4 | 1 |
"""simple docstring"""
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"google/umt5-small": "https://huggingface.co/google/umt5-small/resolve/main/config.json",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class UpperCamelCase__( __A ):
lowerCAmelCase__ : int = 'umt5'
lowerCAmelCase__ : Optional[int] = ['past_key_values']
def __init__( self ,__UpperCAmelCase=25_01_12 ,__UpperCAmelCase=5_12 ,__UpperCAmelCase=64 ,__UpperCAmelCase=10_24 ,__UpperCAmelCase=8 ,__UpperCAmelCase=None ,__UpperCAmelCase=6 ,__UpperCAmelCase=32 ,__UpperCAmelCase=1_28 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=1e-6 ,__UpperCAmelCase=1.0 ,__UpperCAmelCase="gated-gelu" ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase="T5Tokenizer" ,__UpperCAmelCase=True ,__UpperCAmelCase=0 ,__UpperCAmelCase=1 ,__UpperCAmelCase=0 ,**__UpperCAmelCase ,) -> Dict:
super().__init__(
is_encoder_decoder=__UpperCAmelCase ,tokenizer_class=__UpperCAmelCase ,tie_word_embeddings=__UpperCAmelCase ,pad_token_id=__UpperCAmelCase ,eos_token_id=__UpperCAmelCase ,decoder_start_token_id=__UpperCAmelCase ,**__UpperCAmelCase ,)
A__ = vocab_size
A__ = d_model
A__ = d_kv
A__ = d_ff
A__ = num_layers
A__ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
A__ = num_heads
A__ = relative_attention_num_buckets
A__ = relative_attention_max_distance
A__ = dropout_rate
A__ = layer_norm_epsilon
A__ = initializer_factor
A__ = feed_forward_proj
A__ = use_cache
A__ = self.feed_forward_proj.split('-' )
A__ = act_info[-1]
A__ = act_info[0] == 'gated'
if len(__UpperCAmelCase ) > 1 and act_info[0] != "gated" or len(__UpperCAmelCase ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
if feed_forward_proj == "gated-gelu":
A__ = 'gelu_new'
@property
def snake_case__ ( self ) -> str:
return self.d_model
@property
def snake_case__ ( self ) -> str:
return self.num_heads
@property
def snake_case__ ( self ) -> Optional[int]:
return self.num_layers
class UpperCamelCase__( __A ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def snake_case__ ( self ) -> Mapping[str, Mapping[int, str]]:
A__ = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
A__ = 'past_encoder_sequence + sequence'
A__ = {0: 'batch'}
A__ = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
A__ = {0: 'batch', 1: 'decoder_sequence'}
A__ = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__UpperCAmelCase ,direction='inputs' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def snake_case__ ( self ) -> int:
return 13
@property
def snake_case__ ( self ) -> float:
return 5e-4
| 536 | """simple docstring"""
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
__lowerCamelCase = logging.get_logger(__name__)
@add_end_docstrings(__A )
class UpperCamelCase__( __A ):
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[Any]:
super().__init__(*__UpperCAmelCase ,**__UpperCAmelCase )
requires_backends(self ,'decord' )
self.check_model_type(__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ) -> Any:
A__ = {}
if frame_sampling_rate is not None:
A__ = frame_sampling_rate
if num_frames is not None:
A__ = num_frames
A__ = {}
if top_k is not None:
A__ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self ,__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[int]:
return super().__call__(__UpperCAmelCase ,**__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase=None ,__UpperCAmelCase=1 ) -> List[Any]:
if num_frames is None:
A__ = self.model.config.num_frames
if video.startswith('http://' ) or video.startswith('https://' ):
A__ = BytesIO(requests.get(__UpperCAmelCase ).content )
A__ = VideoReader(__UpperCAmelCase )
videoreader.seek(0 )
A__ = 0
A__ = num_frames * frame_sampling_rate - 1
A__ = np.linspace(__UpperCAmelCase ,__UpperCAmelCase ,num=__UpperCAmelCase ,dtype=np.intaa )
A__ = videoreader.get_batch(__UpperCAmelCase ).asnumpy()
A__ = list(__UpperCAmelCase )
A__ = self.image_processor(__UpperCAmelCase ,return_tensors=self.framework )
return model_inputs
def snake_case__ ( self ,__UpperCAmelCase ) -> Optional[int]:
A__ = self.model(**__UpperCAmelCase )
return model_outputs
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase=5 ) -> int:
if top_k > self.model.config.num_labels:
A__ = self.model.config.num_labels
if self.framework == "pt":
A__ = model_outputs.logits.softmax(-1 )[0]
A__ , A__ = probs.topk(__UpperCAmelCase )
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
A__ = scores.tolist()
A__ = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__UpperCAmelCase ,__UpperCAmelCase )]
| 536 | 1 |
"""simple docstring"""
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=1_3 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=2 , __UpperCAmelCase=9_9 , __UpperCAmelCase=0 , __UpperCAmelCase=3_2 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_1_2 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase="last" , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=0 , ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = parent
lowerCAmelCase__ :Union[str, Any] = batch_size
lowerCAmelCase__ :Any = seq_length
lowerCAmelCase__ :Tuple = is_training
lowerCAmelCase__ :Dict = use_input_lengths
lowerCAmelCase__ :int = use_token_type_ids
lowerCAmelCase__ :Tuple = use_labels
lowerCAmelCase__ :List[Any] = gelu_activation
lowerCAmelCase__ :List[str] = sinusoidal_embeddings
lowerCAmelCase__ :Optional[Any] = causal
lowerCAmelCase__ :Optional[Any] = asm
lowerCAmelCase__ :Optional[Any] = n_langs
lowerCAmelCase__ :List[str] = vocab_size
lowerCAmelCase__ :Optional[int] = n_special
lowerCAmelCase__ :List[str] = hidden_size
lowerCAmelCase__ :int = num_hidden_layers
lowerCAmelCase__ :str = num_attention_heads
lowerCAmelCase__ :List[Any] = hidden_dropout_prob
lowerCAmelCase__ :Tuple = attention_probs_dropout_prob
lowerCAmelCase__ :Dict = max_position_embeddings
lowerCAmelCase__ :Optional[int] = type_sequence_label_size
lowerCAmelCase__ :Any = initializer_range
lowerCAmelCase__ :Union[str, Any] = num_labels
lowerCAmelCase__ :Union[str, Any] = num_choices
lowerCAmelCase__ :Tuple = summary_type
lowerCAmelCase__ :int = use_proj
lowerCAmelCase__ :Any = scope
lowerCAmelCase__ :List[Any] = bos_token_id
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ :Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ :List[str] = None
if self.use_input_lengths:
lowerCAmelCase__ :Optional[int] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowerCAmelCase__ :Optional[Any] = None
if self.use_token_type_ids:
lowerCAmelCase__ :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowerCAmelCase__ :Union[str, Any] = None
lowerCAmelCase__ :Union[str, Any] = None
lowerCAmelCase__ :List[Any] = None
if self.use_labels:
lowerCAmelCase__ :str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ :Any = ids_tensor([self.batch_size] , 2 ).float()
lowerCAmelCase__ :Tuple = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ :Tuple = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def snake_case ( self ):
'''simple docstring'''
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = XLMModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ :int = model(__UpperCAmelCase , lengths=__UpperCAmelCase , langs=__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = model(__UpperCAmelCase , langs=__UpperCAmelCase )
lowerCAmelCase__ :List[str] = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = XLMWithLMHeadModel(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ :Any = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = XLMForQuestionAnsweringSimple(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ :str = model(__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = model(__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase )
lowerCAmelCase__ :Tuple = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = XLMForQuestionAnswering(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ :Tuple = model(__UpperCAmelCase )
lowerCAmelCase__ :str = model(
__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , cls_index=__UpperCAmelCase , is_impossible=__UpperCAmelCase , p_mask=__UpperCAmelCase , )
lowerCAmelCase__ :int = model(
__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , cls_index=__UpperCAmelCase , is_impossible=__UpperCAmelCase , )
((lowerCAmelCase__) , ) :Tuple = result_with_labels.to_tuple()
lowerCAmelCase__ :Tuple = model(__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase )
((lowerCAmelCase__) , ) :Any = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :int = XLMForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ :Optional[int] = model(__UpperCAmelCase )
lowerCAmelCase__ :str = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.num_labels
lowerCAmelCase__ :Any = XLMForTokenClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ :Union[str, Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = self.num_choices
lowerCAmelCase__ :str = XLMForMultipleChoice(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ :str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ :Tuple = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ :Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ :Dict = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) :Any = config_and_inputs
lowerCAmelCase__ :Tuple = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( a , a , a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :str = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
__magic_name__ :List[Any] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
__magic_name__ :Any = (
{
"""feature-extraction""": XLMModel,
"""fill-mask""": XLMWithLMHeadModel,
"""question-answering""": XLMForQuestionAnsweringSimple,
"""text-classification""": XLMForSequenceClassification,
"""text-generation""": XLMWithLMHeadModel,
"""token-classification""": XLMForTokenClassification,
"""zero-shot""": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ):
'''simple docstring'''
lowerCAmelCase__ :Dict = super()._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
lowerCAmelCase__ :Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase )
return inputs_dict
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = XLMModelTester(self )
lowerCAmelCase__ :Tuple = ConfigTester(self , config_class=__UpperCAmelCase , emb_dim=3_7 )
def snake_case ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=1 ):
'''simple docstring'''
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
self.assertListEqual(
[isinstance(__UpperCAmelCase , __UpperCAmelCase ) for iter_attentions in attentions] , [True] * len(__UpperCAmelCase ) )
self.assertEqual(len(__UpperCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(__UpperCAmelCase ):
# adds PAD dummy token
lowerCAmelCase__ :Tuple = min_length + idx + 1
lowerCAmelCase__ :int = min_length + idx + 1
lowerCAmelCase__ :Dict = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(__UpperCAmelCase ) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=1 ):
'''simple docstring'''
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
self.assertListEqual(
[isinstance(__UpperCAmelCase , __UpperCAmelCase ) for iter_hidden_states in hidden_states] , [True] * len(__UpperCAmelCase ) , )
self.assertEqual(len(__UpperCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(__UpperCAmelCase ):
# adds PAD dummy token
lowerCAmelCase__ :Tuple = min_length + idx + 1
lowerCAmelCase__ :Dict = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(__UpperCAmelCase ) , )
pass
@slow
def snake_case ( self ):
'''simple docstring'''
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ :List[str] = XLMModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=__UpperCAmelCase ) # the president
lowerCAmelCase__ :Dict = [
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
lowerCAmelCase__ :Tuple = model.generate(__UpperCAmelCase , do_sample=__UpperCAmelCase )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , __UpperCAmelCase )
| 93 |
"""simple docstring"""
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def lowerCamelCase_ ( *UpperCAmelCase_ ) ->Optional[int]:
"""simple docstring"""
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
__UpperCAmelCase : List[Any] = list(UpperCAmelCase_ )
for i in range(len(UpperCAmelCase_ ) ):
__UpperCAmelCase : Optional[Any] = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def lowerCamelCase_ ( UpperCAmelCase_ ) ->bool:
"""simple docstring"""
__UpperCAmelCase : Optional[int] = [
'''CUDA out of memory.''', # CUDA OOM
'''cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.''', # CUDNN SNAFU
'''DefaultCPUAllocator: can\'t allocate memory''', # CPU OOM
]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def lowerCamelCase_ ( UpperCAmelCase_ = None , UpperCAmelCase_ = 1_28 ) ->str:
"""simple docstring"""
if function is None:
return functools.partial(UpperCAmelCase_ , starting_batch_size=UpperCAmelCase_ )
__UpperCAmelCase : List[str] = starting_batch_size
def decorator(*UpperCAmelCase_ , **UpperCAmelCase_ ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
__UpperCAmelCase : Optional[int] = list(inspect.signature(UpperCAmelCase_ ).parameters.keys() )
# Guard against user error
if len(UpperCAmelCase_ ) < (len(UpperCAmelCase_ ) + 1):
__UpperCAmelCase : Dict = ''', '''.join([f'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
f'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
f'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' )
while True:
if batch_size == 0:
raise RuntimeError('''No executable batch size found, reached zero.''' )
try:
return function(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_ )
except Exception as e:
if should_reduce_batch_size(UpperCAmelCase_ ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator | 522 | 0 |
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase = 16
lowerCAmelCase = 32
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 16 ):
"""simple docstring"""
lowercase__ = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowercase__ = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(SCREAMING_SNAKE_CASE ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase__ = datasets.map(
SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(SCREAMING_SNAKE_CASE ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase__ = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase__ = 16
elif accelerator.mixed_precision != "no":
lowercase__ = 8
else:
lowercase__ = None
return tokenizer.pad(
SCREAMING_SNAKE_CASE , padding='''longest''' , max_length=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowercase__ = DataLoader(
tokenized_datasets['''train'''] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
lowercase__ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase = mocked_dataloaders # noqa: F811
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , SCREAMING_SNAKE_CASE ) == "1":
lowercase__ = 2
# Initialize accelerator
lowercase__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ = config['''lr''']
lowercase__ = int(config['''num_epochs'''] )
lowercase__ = int(config['''seed'''] )
lowercase__ = int(config['''batch_size'''] )
lowercase__ = evaluate.load('''glue''' , '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=SCREAMING_SNAKE_CASE )
def inner_training_loop(SCREAMING_SNAKE_CASE ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase__ = model.to(accelerator.device )
# Instantiate optimizer
lowercase__ = AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE )
lowercase__ , lowercase__ = get_dataloaders(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Instantiate scheduler
lowercase__ = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE , num_warmup_steps=1_00 , num_training_steps=(len(SCREAMING_SNAKE_CASE ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = accelerator.prepare(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowercase__ = model(**SCREAMING_SNAKE_CASE )
lowercase__ = outputs.loss
accelerator.backward(SCREAMING_SNAKE_CASE )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ = model(**SCREAMING_SNAKE_CASE )
lowercase__ = outputs.logits.argmax(dim=-1 )
lowercase__ , lowercase__ = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE , references=SCREAMING_SNAKE_CASE , )
lowercase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , SCREAMING_SNAKE_CASE )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def _a ( ):
"""simple docstring"""
lowercase__ = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
lowercase__ = parser.parse_args()
lowercase__ = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 429 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
lowerCAmelCase = get_logger(__name__)
class _a :
def __init__( self: Dict , UpperCamelCase_: Optional[str] = None ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = (
os.path.join(UpperCamelCase_ , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
lowercase__ = Extractor
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: str ) -> str:
"""simple docstring"""
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
lowercase__ = os.path.abspath(UpperCamelCase_ )
return os.path.join(self.extract_dir , hash_url_to_filename(UpperCamelCase_ ) )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: str , UpperCamelCase_: bool ) -> bool:
"""simple docstring"""
return force_extract or (
not os.path.isfile(UpperCamelCase_ ) and not (os.path.isdir(UpperCamelCase_ ) and os.listdir(UpperCamelCase_ ))
)
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: str , UpperCamelCase_: bool = False ) -> str:
"""simple docstring"""
lowercase__ = self.extractor.infer_extractor_format(UpperCamelCase_ )
if not extractor_format:
return input_path
lowercase__ = self._get_output_path(UpperCamelCase_ )
if self._do_extract(UpperCamelCase_ , UpperCamelCase_ ):
self.extractor.extract(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return output_path
class _a ( UpperCamelCase__ ):
@classmethod
@abstractmethod
def lowerCamelCase_ ( cls: str , UpperCamelCase_: Union[Path, str] , **UpperCamelCase_: Any ) -> bool:
"""simple docstring"""
...
@staticmethod
@abstractmethod
def lowerCamelCase_ ( UpperCamelCase_: Union[Path, str] , UpperCamelCase_: Union[Path, str] ) -> None:
"""simple docstring"""
...
class _a ( UpperCamelCase__ , UpperCamelCase__ ):
_lowercase : List[bytes] = []
@staticmethod
def lowerCamelCase_ ( UpperCamelCase_: Union[Path, str] , UpperCamelCase_: int ) -> Union[str, Any]:
"""simple docstring"""
with open(UpperCamelCase_ , '''rb''' ) as f:
return f.read(UpperCamelCase_ )
@classmethod
def lowerCamelCase_ ( cls: int , UpperCamelCase_: Union[Path, str] , UpperCamelCase_: bytes = b"" ) -> bool:
"""simple docstring"""
if not magic_number:
lowercase__ = max(len(UpperCamelCase_ ) for cls_magic_number in cls.magic_numbers )
try:
lowercase__ = cls.read_magic_number(UpperCamelCase_ , UpperCamelCase_ )
except OSError:
return False
return any(magic_number.startswith(UpperCamelCase_ ) for cls_magic_number in cls.magic_numbers )
class _a ( UpperCamelCase__ ):
@classmethod
def lowerCamelCase_ ( cls: Dict , UpperCamelCase_: Union[Path, str] , **UpperCamelCase_: Dict ) -> bool:
"""simple docstring"""
return tarfile.is_tarfile(UpperCamelCase_ )
@staticmethod
def lowerCamelCase_ ( UpperCamelCase_: Tuple , UpperCamelCase_: Union[str, Any] ) -> List[Any]:
"""simple docstring"""
def resolved(UpperCamelCase_: str ) -> str:
return os.path.realpath(os.path.abspath(UpperCamelCase_ ) )
def badpath(UpperCamelCase_: str , UpperCamelCase_: str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) ).startswith(UpperCamelCase_ )
def badlink(UpperCamelCase_: Any , UpperCamelCase_: str ) -> bool:
# Links are interpreted relative to the directory containing the link
lowercase__ = resolved(os.path.join(UpperCamelCase_ , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=UpperCamelCase_ )
lowercase__ = resolved(UpperCamelCase_ )
for finfo in members:
if badpath(finfo.name , UpperCamelCase_ ):
logger.error(f'Extraction of {finfo.name} is blocked (illegal path)' )
elif finfo.issym() and badlink(UpperCamelCase_ , UpperCamelCase_ ):
logger.error(f'Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}' )
elif finfo.islnk() and badlink(UpperCamelCase_ , UpperCamelCase_ ):
logger.error(f'Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}' )
else:
yield finfo
@staticmethod
def lowerCamelCase_ ( UpperCamelCase_: Union[Path, str] , UpperCamelCase_: Union[Path, str] ) -> None:
"""simple docstring"""
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
lowercase__ = tarfile.open(UpperCamelCase_ )
tar_file.extractall(UpperCamelCase_ , members=TarExtractor.safemembers(UpperCamelCase_ , UpperCamelCase_ ) )
tar_file.close()
class _a ( UpperCamelCase__ ):
_lowercase : Dict = [b'''\x1F\x8B''']
@staticmethod
def lowerCamelCase_ ( UpperCamelCase_: Union[Path, str] , UpperCamelCase_: Union[Path, str] ) -> None:
"""simple docstring"""
with gzip.open(UpperCamelCase_ , '''rb''' ) as gzip_file:
with open(UpperCamelCase_ , '''wb''' ) as extracted_file:
shutil.copyfileobj(UpperCamelCase_ , UpperCamelCase_ )
class _a ( UpperCamelCase__ ):
_lowercase : List[str] = [
b'''PK\x03\x04''',
b'''PK\x05\x06''', # empty archive
b'''PK\x07\x08''', # spanned archive
]
@classmethod
def lowerCamelCase_ ( cls: str , UpperCamelCase_: Union[Path, str] , UpperCamelCase_: bytes = b"" ) -> bool:
"""simple docstring"""
if super().is_extractable(UpperCamelCase_ , magic_number=UpperCamelCase_ ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(UpperCamelCase_ , '''rb''' ) as fp:
lowercase__ = _EndRecData(UpperCamelCase_ )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
lowercase__ = fp.read(UpperCamelCase_ ) # CD is where we expect it to be
if len(UpperCamelCase_ ) == sizeCentralDir:
lowercase__ = struct.unpack(UpperCamelCase_ , UpperCamelCase_ ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def lowerCamelCase_ ( UpperCamelCase_: Union[Path, str] , UpperCamelCase_: Union[Path, str] ) -> None:
"""simple docstring"""
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
with zipfile.ZipFile(UpperCamelCase_ , '''r''' ) as zip_file:
zip_file.extractall(UpperCamelCase_ )
zip_file.close()
class _a ( UpperCamelCase__ ):
_lowercase : int = [b'''\xFD\x37\x7A\x58\x5A\x00''']
@staticmethod
def lowerCamelCase_ ( UpperCamelCase_: Union[Path, str] , UpperCamelCase_: Union[Path, str] ) -> None:
"""simple docstring"""
with lzma.open(UpperCamelCase_ ) as compressed_file:
with open(UpperCamelCase_ , '''wb''' ) as extracted_file:
shutil.copyfileobj(UpperCamelCase_ , UpperCamelCase_ )
class _a ( UpperCamelCase__ ):
_lowercase : Union[str, Any] = [b'''Rar!\x1a\x07\x00''', b'''Rar!\x1a\x07\x01\x00'''] # RAR_ID # RAR5_ID
@staticmethod
def lowerCamelCase_ ( UpperCamelCase_: Union[Path, str] , UpperCamelCase_: Union[Path, str] ) -> None:
"""simple docstring"""
if not config.RARFILE_AVAILABLE:
raise ImportError('''Please pip install rarfile''' )
import rarfile
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
lowercase__ = rarfile.RarFile(UpperCamelCase_ )
rf.extractall(UpperCamelCase_ )
rf.close()
class _a ( UpperCamelCase__ ):
_lowercase : List[Any] = [b'''\x28\xb5\x2F\xFD''']
@staticmethod
def lowerCamelCase_ ( UpperCamelCase_: Union[Path, str] , UpperCamelCase_: Union[Path, str] ) -> None:
"""simple docstring"""
if not config.ZSTANDARD_AVAILABLE:
raise ImportError('''Please pip install zstandard''' )
import zstandard as zstd
lowercase__ = zstd.ZstdDecompressor()
with open(UpperCamelCase_ , '''rb''' ) as ifh, open(UpperCamelCase_ , '''wb''' ) as ofh:
dctx.copy_stream(UpperCamelCase_ , UpperCamelCase_ )
class _a ( UpperCamelCase__ ):
_lowercase : List[Any] = [b'''\x42\x5A\x68''']
@staticmethod
def lowerCamelCase_ ( UpperCamelCase_: Union[Path, str] , UpperCamelCase_: Union[Path, str] ) -> None:
"""simple docstring"""
with bza.open(UpperCamelCase_ , '''rb''' ) as compressed_file:
with open(UpperCamelCase_ , '''wb''' ) as extracted_file:
shutil.copyfileobj(UpperCamelCase_ , UpperCamelCase_ )
class _a ( UpperCamelCase__ ):
_lowercase : str = [b'''\x37\x7A\xBC\xAF\x27\x1C''']
@staticmethod
def lowerCamelCase_ ( UpperCamelCase_: Union[Path, str] , UpperCamelCase_: Union[Path, str] ) -> None:
"""simple docstring"""
if not config.PY7ZR_AVAILABLE:
raise ImportError('''Please pip install py7zr''' )
import pyazr
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
with pyazr.SevenZipFile(UpperCamelCase_ , '''r''' ) as archive:
archive.extractall(UpperCamelCase_ )
class _a ( UpperCamelCase__ ):
_lowercase : str = [b'''\x04\x22\x4D\x18''']
@staticmethod
def lowerCamelCase_ ( UpperCamelCase_: Union[Path, str] , UpperCamelCase_: Union[Path, str] ) -> None:
"""simple docstring"""
if not config.LZ4_AVAILABLE:
raise ImportError('''Please pip install lz4''' )
import lza.frame
with lza.frame.open(UpperCamelCase_ , '''rb''' ) as compressed_file:
with open(UpperCamelCase_ , '''wb''' ) as extracted_file:
shutil.copyfileobj(UpperCamelCase_ , UpperCamelCase_ )
class _a :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
_lowercase : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def lowerCamelCase_ ( cls: Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return max(
len(UpperCamelCase_ )
for extractor in cls.extractors.values()
if issubclass(UpperCamelCase_ , UpperCamelCase_ )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def lowerCamelCase_ ( UpperCamelCase_: Union[Path, str] , UpperCamelCase_: int ) -> Optional[Any]:
"""simple docstring"""
try:
return MagicNumberBaseExtractor.read_magic_number(UpperCamelCase_ , magic_number_length=UpperCamelCase_ )
except OSError:
return b""
@classmethod
def lowerCamelCase_ ( cls: List[Any] , UpperCamelCase_: Union[Path, str] , UpperCamelCase_: bool = False ) -> bool:
"""simple docstring"""
warnings.warn(
'''Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'infer_extractor_format\' instead.''' , category=UpperCamelCase_ , )
lowercase__ = cls.infer_extractor_format(UpperCamelCase_ )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def lowerCamelCase_ ( cls: List[Any] , UpperCamelCase_: Union[Path, str] ) -> str: # <Added version="2.4.0"/>
"""simple docstring"""
lowercase__ = cls._get_magic_number_max_length()
lowercase__ = cls._read_magic_number(UpperCamelCase_ , UpperCamelCase_ )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(UpperCamelCase_ , magic_number=UpperCamelCase_ ):
return extractor_format
@classmethod
def lowerCamelCase_ ( cls: Optional[int] , UpperCamelCase_: Union[Path, str] , UpperCamelCase_: Union[Path, str] , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: Optional[BaseExtractor] = "deprecated" , ) -> None:
"""simple docstring"""
os.makedirs(os.path.dirname(UpperCamelCase_ ) , exist_ok=UpperCamelCase_ )
# Prevent parallel extractions
lowercase__ = str(Path(UpperCamelCase_ ).with_suffix('''.lock''' ) )
with FileLock(UpperCamelCase_ ):
shutil.rmtree(UpperCamelCase_ , ignore_errors=UpperCamelCase_ )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(UpperCamelCase_ , UpperCamelCase_ ): # passed as positional arg
warnings.warn(
'''Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'extractor_format\' instead.''' , category=UpperCamelCase_ , )
lowercase__ = extractor if extractor != '''deprecated''' else extractor_format
else:
lowercase__ = cls.extractors[extractor_format]
return extractor.extract(UpperCamelCase_ , UpperCamelCase_ )
else:
warnings.warn(
'''Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an '''
'''exception in 3.0.0.''' , category=UpperCamelCase_ , )
for extractor in cls.extractors.values():
if extractor.is_extractable(UpperCamelCase_ ):
return extractor.extract(UpperCamelCase_ , UpperCamelCase_ )
| 429 | 1 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any]=7 , __SCREAMING_SNAKE_CASE : List[Any]=3 , __SCREAMING_SNAKE_CASE : Optional[int]=18 , __SCREAMING_SNAKE_CASE : List[str]=30 , __SCREAMING_SNAKE_CASE : List[Any]=400 , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Optional[int]=True , ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = size if size is not None else {"""height""": 18, """width""": 18}
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = min_resolution
__SCREAMING_SNAKE_CASE = max_resolution
__SCREAMING_SNAKE_CASE = do_resize
__SCREAMING_SNAKE_CASE = size
__SCREAMING_SNAKE_CASE = do_normalize
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8866443634033203, 0.6618829369544983, 0.3891746401786804],
[-0.6042559146881104, -0.02295008860528469, 0.5423797369003296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class lowerCAmelCase__ ( a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = ImageGPTImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : str ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ImageGPTImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self : int ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """clusters""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_resize""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """size""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_normalize""" ) )
def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
__SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
__SCREAMING_SNAKE_CASE = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(__SCREAMING_SNAKE_CASE , obj[key] ) )
else:
self.assertEqual(obj[key] , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE = os.path.join(__SCREAMING_SNAKE_CASE , """image_processor.json""" )
image_processor_first.to_json_file(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.image_processing_class.from_json_file(__SCREAMING_SNAKE_CASE ).to_dict()
__SCREAMING_SNAKE_CASE = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(__SCREAMING_SNAKE_CASE , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.image_processing_class.from_pretrained(__SCREAMING_SNAKE_CASE ).to_dict()
__SCREAMING_SNAKE_CASE = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(__SCREAMING_SNAKE_CASE , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , __SCREAMING_SNAKE_CASE )
@unittest.skip("""ImageGPT requires clusters at initialization""" )
def UpperCAmelCase__ ( self : int ) -> str:
"""simple docstring"""
pass
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = load_dataset("""hf-internal-testing/fixtures_image_utils""" , split="""test""" )
__SCREAMING_SNAKE_CASE = Image.open(dataset[4]["""file"""] )
__SCREAMING_SNAKE_CASE = Image.open(dataset[5]["""file"""] )
__SCREAMING_SNAKE_CASE = [imagea, imagea]
return images
@require_vision
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" )
__SCREAMING_SNAKE_CASE = prepare_images()
# test non-batched
__SCREAMING_SNAKE_CASE = image_processing(images[0] , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1_024) )
__SCREAMING_SNAKE_CASE = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , __SCREAMING_SNAKE_CASE )
# test batched
__SCREAMING_SNAKE_CASE = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1_024) )
__SCREAMING_SNAKE_CASE = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , __SCREAMING_SNAKE_CASE )
| 627 |
'''simple docstring'''
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str=13 , __SCREAMING_SNAKE_CASE : int=7 , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Any=True , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Dict=99 , __SCREAMING_SNAKE_CASE : Optional[int]=24 , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : Any=6 , __SCREAMING_SNAKE_CASE : Optional[int]=37 , __SCREAMING_SNAKE_CASE : Optional[int]="gelu" , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : List[Any]=0.1 , __SCREAMING_SNAKE_CASE : int=512 , __SCREAMING_SNAKE_CASE : str=16 , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : List[str]=0.02 , __SCREAMING_SNAKE_CASE : int=3 , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : List[Any]=1_000 , ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_input_mask
__SCREAMING_SNAKE_CASE = use_token_type_ids
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = scope
__SCREAMING_SNAKE_CASE = range_bbox
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__SCREAMING_SNAKE_CASE = bbox[i, j, 3]
__SCREAMING_SNAKE_CASE = bbox[i, j, 1]
__SCREAMING_SNAKE_CASE = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__SCREAMING_SNAKE_CASE = bbox[i, j, 2]
__SCREAMING_SNAKE_CASE = bbox[i, j, 0]
__SCREAMING_SNAKE_CASE = t
__SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase__ ( self : str ) -> int:
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int , ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = LiltModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , bbox=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , bbox=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , bbox=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[Any] , ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = LiltForTokenClassification(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(
__SCREAMING_SNAKE_CASE , bbox=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = LiltForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(
__SCREAMING_SNAKE_CASE , bbox=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , start_positions=__SCREAMING_SNAKE_CASE , end_positions=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self : Dict ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
__SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( a , a , a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{
"feature-extraction": LiltModel,
"question-answering": LiltForQuestionAnswering,
"text-classification": LiltForSequenceClassification,
"token-classification": LiltForTokenClassification,
"zero-shot": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int ) -> str:
"""simple docstring"""
return True
def UpperCAmelCase__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = LiltModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def UpperCAmelCase__ ( self : int ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : int ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Any ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase__ ( self : Dict ) -> int:
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = LiltModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@require_torch
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = LiltModel.from_pretrained("""SCUT-DLVCLab/lilt-roberta-en-base""" ).to(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.tensor([[1, 2]] , device=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(input_ids=__SCREAMING_SNAKE_CASE , bbox=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.Size([1, 2, 768] )
__SCREAMING_SNAKE_CASE = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=__SCREAMING_SNAKE_CASE , )
self.assertTrue(outputs.last_hidden_state.shape , __SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , __SCREAMING_SNAKE_CASE , atol=1E-3 ) )
| 627 | 1 |
"""simple docstring"""
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
__UpperCAmelCase =False
try:
__UpperCAmelCase =_is_package_available("""google.colab""")
except ModuleNotFoundError:
pass
@input.register
class lowerCAmelCase__ :
def __init__( self , UpperCamelCase__ = None , UpperCamelCase__ = [] ):
'''simple docstring'''
A__ = 0
A__ = choices
A__ = prompt
if sys.platform == "win32":
A__ = "*"
else:
A__ = "➔ "
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ = "" ):
'''simple docstring'''
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , UpperCamelCase__ )
else:
forceWrite(self.choices[index] , UpperCamelCase__ )
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
if index == self.position:
forceWrite(f""" {self.arrow_char} """ )
self.write_choice(UpperCamelCase__ )
else:
forceWrite(f""" {self.choices[index]}""" )
reset_cursor()
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ = 1 ):
'''simple docstring'''
A__ = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(UpperCamelCase__ )
move_cursor(UpperCamelCase__ , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["up"] )
def lowercase_ ( self ):
'''simple docstring'''
self.move_direction(Direction.UP )
@input.mark(KEYMAP["down"] )
def lowercase_ ( self ):
'''simple docstring'''
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["newline"] )
def lowercase_ ( self ):
'''simple docstring'''
move_cursor(len(self.choices ) - self.position , "DOWN" )
return self.position
@input.mark(KEYMAP["interrupt"] )
def lowercase_ ( self ):
'''simple docstring'''
move_cursor(len(self.choices ) - self.position , "DOWN" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(UpperCamelCase__ )] for number in range(10 )] )
def lowercase_ ( self ):
'''simple docstring'''
A__ = int(chr(self.current_selection ) )
A__ = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , UpperCamelCase__ )
else:
return
else:
return
def lowercase_ ( self , UpperCamelCase__ = 0 ):
'''simple docstring'''
if self.prompt:
linebreak()
forceWrite(self.prompt , "\n" )
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter" , "\n" )
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" , "\n" )
A__ = default_choice
for i in range(len(self.choices ) ):
self.print_choice(UpperCamelCase__ )
forceWrite("\n" )
move_cursor(len(self.choices ) - self.position , "UP" )
with cursor.hide():
while True:
if in_colab:
try:
A__ = int(builtins.input() )
except ValueError:
A__ = default_choice
else:
A__ = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , "UP" )
clear_line()
self.write_choice(UpperCamelCase__ , "\n" )
return choice | 261 |
"""simple docstring"""
# Lint as: python3
import itertools
import os
import re
__UpperCAmelCase =re.compile(r"""([A-Z]+)([A-Z][a-z])""")
__UpperCAmelCase =re.compile(r"""([a-z\d])([A-Z])""")
__UpperCAmelCase =re.compile(r"""(?<!_)_(?!_)""")
__UpperCAmelCase =re.compile(r"""(_{2,})""")
__UpperCAmelCase =r"""^\w+(\.\w+)*$"""
__UpperCAmelCase =r"""<>:/\|?*"""
def __a ( A ) -> Tuple:
'''simple docstring'''
A__ = _uppercase_uppercase_re.sub(R"\1_\2" , A )
A__ = _lowercase_uppercase_re.sub(R"\1_\2" , A )
return name.lower()
def __a ( A ) -> int:
'''simple docstring'''
A__ = _single_underscore_re.split(A )
A__ = [_multiple_underscores_re.split(A ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(A ) if n != "" )
def __a ( A ) -> Optional[Any]:
'''simple docstring'''
if os.path.basename(A ) != name:
raise ValueError(f"""Should be a dataset name, not a path: {name}""" )
return camelcase_to_snakecase(A )
def __a ( A , A ) -> Optional[int]:
'''simple docstring'''
if os.path.basename(A ) != name:
raise ValueError(f"""Should be a dataset name, not a path: {name}""" )
if not re.match(_split_re , A ):
raise ValueError(f"""Split name should match '{_split_re}'' but got '{split}'.""" )
return f"""{filename_prefix_for_name(A )}-{split}"""
def __a ( A , A , A , A=None ) -> List[Any]:
'''simple docstring'''
A__ = filename_prefix_for_split(A , A )
if filetype_suffix:
prefix += f""".{filetype_suffix}"""
A__ = os.path.join(A , A )
return f"""{filepath}*"""
def __a ( A , A , A , A=None , A=None ) -> List[Any]:
'''simple docstring'''
A__ = filename_prefix_for_split(A , A )
A__ = os.path.join(A , A )
if shard_lengths:
A__ = len(A )
A__ = [f"""{prefix}-{shard_id:05d}-of-{num_shards:05d}""" for shard_id in range(A )]
if filetype_suffix:
A__ = [filename + f""".{filetype_suffix}""" for filename in filenames]
return filenames
else:
A__ = prefix
if filetype_suffix:
filename += f""".{filetype_suffix}"""
return [filename] | 261 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
_lowerCAmelCase = {"tokenization_herbert": ["HerbertTokenizer"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ["HerbertTokenizerFast"]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 180 |
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
_A : str = LEDTokenizer
_A : List[Any] = LEDTokenizerFast
_A : Dict = True
def lowerCamelCase(self ):
super().setUp()
A_ : Dict = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
A_ : Any = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
A_ : List[Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
A_ : List[str] = {"""unk_token""": """<unk>"""}
A_ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCAmelCase_ ) )
def lowerCamelCase(self , **lowerCAmelCase_ ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowerCamelCase(self , **lowerCAmelCase_ ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowerCamelCase(self , lowerCAmelCase_ ):
return "lower newer", "lower newer"
@cached_property
def lowerCamelCase(self ):
return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" )
@cached_property
def lowerCamelCase(self ):
return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" )
@require_torch
def lowerCamelCase(self ):
A_ : Optional[int] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
A_ : Any = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
A_ : Optional[int] = tokenizer(lowerCAmelCase_ , max_length=len(lowerCAmelCase_ ) , padding=lowerCAmelCase_ , return_tensors="""pt""" )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
A_ : int = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@require_torch
def lowerCamelCase(self ):
A_ : Union[str, Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
A_ : Tuple = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors="""pt""" )
self.assertIn("""input_ids""" , lowerCAmelCase_ )
self.assertIn("""attention_mask""" , lowerCAmelCase_ )
self.assertNotIn("""labels""" , lowerCAmelCase_ )
self.assertNotIn("""decoder_attention_mask""" , lowerCAmelCase_ )
@require_torch
def lowerCamelCase(self ):
A_ : Tuple = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
A_ : Optional[int] = tokenizer(text_target=lowerCAmelCase_ , max_length=32 , padding="""max_length""" , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
@require_torch
def lowerCamelCase(self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
A_ : Optional[Any] = tokenizer(
["""I am a small frog""" * 1024, """I am a small frog"""] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors="""pt""" )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(batch.input_ids.shape , (2, 5122) )
@require_torch
def lowerCamelCase(self ):
A_ : Dict = ["""A long paragraph for summarization."""]
A_ : Any = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
A_ : int = tokenizer(lowerCAmelCase_ , return_tensors="""pt""" )
A_ : Optional[int] = tokenizer(text_target=lowerCAmelCase_ , return_tensors="""pt""" )
A_ : str = inputs["""input_ids"""]
A_ : Tuple = targets["""input_ids"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def lowerCamelCase(self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
A_ : str = ["""Summary of the text.""", """Another summary."""]
A_ : Dict = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
A_ : Optional[Any] = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ )
A_ : Any = [[0] * len(lowerCAmelCase_ ) for x in encoded_output["""input_ids"""]]
A_ : Tuple = tokenizer.pad(lowerCAmelCase_ )
self.assertSequenceEqual(outputs["""global_attention_mask"""] , lowerCAmelCase_ )
def lowerCamelCase(self ):
pass
def lowerCamelCase(self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
A_ : Any = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
A_ : Optional[Any] = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
A_ : Optional[Any] = """A, <mask> AllenNLP sentence."""
A_ : List[Any] = tokenizer_r.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
A_ : Any = tokenizer_p.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
A_ : List[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
A_ : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
lowerCAmelCase_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 180 | 1 |
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class A__ ( A ):
"""simple docstring"""
def __init__( self : List[Any] , *A_ : Dict , **A_ : Optional[int] ):
'''simple docstring'''
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead." , A_ , )
super().__init__(*A_ , **A_ )
| 503 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __magic_name__ ( self : Any ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = 1
_lowerCAmelCase : List[str] = 3
_lowerCAmelCase : int = (3_2, 3_2)
_lowerCAmelCase : Dict = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(A_ )
return image
@property
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Dict = UNetaDConditionModel(
block_out_channels=(3_2, 3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , attention_head_dim=8 , use_linear_projection=A_ , only_cross_attention=(True, True, False) , num_class_embeds=1_0_0 , )
return model
@property
def __magic_name__ ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Union[str, Any] = AutoencoderKL(
block_out_channels=[3_2, 3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="gelu" , projection_dim=5_1_2 , )
return CLIPTextModel(A_ )
def __magic_name__ ( self : str ):
'''simple docstring'''
_lowerCAmelCase : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : Tuple = self.dummy_cond_unet_upscale
_lowerCAmelCase : Optional[int] = DDPMScheduler()
_lowerCAmelCase : Optional[Any] = DDIMScheduler(prediction_type="v_prediction" )
_lowerCAmelCase : str = self.dummy_vae
_lowerCAmelCase : Any = self.dummy_text_encoder
_lowerCAmelCase : Dict = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_lowerCAmelCase : List[Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase : List[Any] = Image.fromarray(np.uinta(A_ ) ).convert("RGB" ).resize((6_4, 6_4) )
# make sure here that pndm scheduler skips prk
_lowerCAmelCase : List[Any] = StableDiffusionUpscalePipeline(
unet=A_ , low_res_scheduler=A_ , scheduler=A_ , vae=A_ , text_encoder=A_ , tokenizer=A_ , max_noise_level=3_5_0 , )
_lowerCAmelCase : Optional[int] = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
_lowerCAmelCase : List[str] = "A painting of a squirrel eating a burger"
_lowerCAmelCase : Tuple = torch.Generator(device=A_ ).manual_seed(0 )
_lowerCAmelCase : Optional[int] = sd_pipe(
[prompt] , image=A_ , generator=A_ , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="np" , )
_lowerCAmelCase : Tuple = output.images
_lowerCAmelCase : Optional[int] = torch.Generator(device=A_ ).manual_seed(0 )
_lowerCAmelCase : Union[str, Any] = sd_pipe(
[prompt] , image=A_ , generator=A_ , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="np" , return_dict=A_ , )[0]
_lowerCAmelCase : List[Any] = image[0, -3:, -3:, -1]
_lowerCAmelCase : List[str] = image_from_tuple[0, -3:, -3:, -1]
_lowerCAmelCase : Optional[int] = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
_lowerCAmelCase : str = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __magic_name__ ( self : int ):
'''simple docstring'''
_lowerCAmelCase : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : List[str] = self.dummy_cond_unet_upscale
_lowerCAmelCase : str = DDPMScheduler()
_lowerCAmelCase : Union[str, Any] = DDIMScheduler(prediction_type="v_prediction" )
_lowerCAmelCase : Tuple = self.dummy_vae
_lowerCAmelCase : Union[str, Any] = self.dummy_text_encoder
_lowerCAmelCase : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_lowerCAmelCase : Dict = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase : Tuple = Image.fromarray(np.uinta(A_ ) ).convert("RGB" ).resize((6_4, 6_4) )
# make sure here that pndm scheduler skips prk
_lowerCAmelCase : Optional[Any] = StableDiffusionUpscalePipeline(
unet=A_ , low_res_scheduler=A_ , scheduler=A_ , vae=A_ , text_encoder=A_ , tokenizer=A_ , max_noise_level=3_5_0 , )
_lowerCAmelCase : Dict = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
_lowerCAmelCase : Union[str, Any] = "A painting of a squirrel eating a burger"
_lowerCAmelCase : Tuple = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="np" , )
_lowerCAmelCase : List[str] = output.images
assert image.shape[0] == 2
_lowerCAmelCase : str = torch.Generator(device=A_ ).manual_seed(0 )
_lowerCAmelCase : List[Any] = sd_pipe(
[prompt] , image=A_ , generator=A_ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="np" , )
_lowerCAmelCase : int = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
_lowerCAmelCase : Any = self.dummy_cond_unet_upscale
_lowerCAmelCase : List[str] = DDPMScheduler()
_lowerCAmelCase : Optional[int] = DDIMScheduler(prediction_type="v_prediction" )
_lowerCAmelCase : Optional[Any] = self.dummy_vae
_lowerCAmelCase : List[str] = self.dummy_text_encoder
_lowerCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_lowerCAmelCase : Any = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase : Any = Image.fromarray(np.uinta(A_ ) ).convert("RGB" ).resize((6_4, 6_4) )
# put models in fp16, except vae as it overflows in fp16
_lowerCAmelCase : Tuple = unet.half()
_lowerCAmelCase : Optional[Any] = text_encoder.half()
# make sure here that pndm scheduler skips prk
_lowerCAmelCase : List[str] = StableDiffusionUpscalePipeline(
unet=A_ , low_res_scheduler=A_ , scheduler=A_ , vae=A_ , text_encoder=A_ , tokenizer=A_ , max_noise_level=3_5_0 , )
_lowerCAmelCase : Optional[Any] = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
_lowerCAmelCase : str = "A painting of a squirrel eating a burger"
_lowerCAmelCase : Any = torch.manual_seed(0 )
_lowerCAmelCase : Optional[Any] = sd_pipe(
[prompt] , image=A_ , generator=A_ , num_inference_steps=2 , output_type="np" , ).images
_lowerCAmelCase : List[str] = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __magic_name__ ( self : Any ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_lowerCAmelCase : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat.npy" )
_lowerCAmelCase : Union[str, Any] = "stabilityai/stable-diffusion-x4-upscaler"
_lowerCAmelCase : Optional[int] = StableDiffusionUpscalePipeline.from_pretrained(A_ )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing()
_lowerCAmelCase : str = "a cat sitting on a park bench"
_lowerCAmelCase : str = torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = pipe(
prompt=A_ , image=A_ , generator=A_ , output_type="np" , )
_lowerCAmelCase : int = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_lowerCAmelCase : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat_fp16.npy" )
_lowerCAmelCase : Union[str, Any] = "stabilityai/stable-diffusion-x4-upscaler"
_lowerCAmelCase : Any = StableDiffusionUpscalePipeline.from_pretrained(
A_ , torch_dtype=torch.floataa , )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing()
_lowerCAmelCase : Optional[Any] = "a cat sitting on a park bench"
_lowerCAmelCase : Any = torch.manual_seed(0 )
_lowerCAmelCase : int = pipe(
prompt=A_ , image=A_ , generator=A_ , output_type="np" , )
_lowerCAmelCase : Union[str, Any] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_lowerCAmelCase : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_lowerCAmelCase : Tuple = "stabilityai/stable-diffusion-x4-upscaler"
_lowerCAmelCase : Any = StableDiffusionUpscalePipeline.from_pretrained(
A_ , torch_dtype=torch.floataa , )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_lowerCAmelCase : List[Any] = "a cat sitting on a park bench"
_lowerCAmelCase : Union[str, Any] = torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = pipe(
prompt=A_ , image=A_ , generator=A_ , num_inference_steps=5 , output_type="np" , )
_lowerCAmelCase : Optional[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 1_0**9
| 503 | 1 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def lowerCAmelCase_ ( __a = 3 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
if isinstance(__a , __a ):
raise TypeError("number of qubits must be a integer." )
if number_of_qubits <= 0:
raise ValueError("number of qubits must be > 0." )
if math.floor(__a ) != number_of_qubits:
raise ValueError("number of qubits must be exact integer." )
if number_of_qubits > 10:
raise ValueError("number of qubits too large to simulate(>10)." )
lowerCamelCase__: Optional[int] =QuantumRegister(__a , "qr" )
lowerCamelCase__: Tuple =ClassicalRegister(__a , "cr" )
lowerCamelCase__: Union[str, Any] =QuantumCircuit(__a , __a )
lowerCamelCase__: Optional[int] =number_of_qubits
for i in range(__a ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(__a ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , __a , __a )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(__a , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(__a , __a )
# simulate with 10000 shots
lowerCamelCase__: List[str] =Aer.get_backend("qasm_simulator" )
lowerCamelCase__: Tuple =execute(__a , __a , shots=10000 )
return job.result().get_counts(__a )
if __name__ == "__main__":
print(
f'Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'
)
| 59 |
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def _snake_case ( UpperCamelCase : str = "AAPL" ):
UpperCAmelCase : int = F"https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"
UpperCAmelCase : Dict = BeautifulSoup(requests.get(UpperCamelCase ).text , """html.parser""" )
UpperCAmelCase : List[str] = """My(6px) Pos(r) smartphone_Mt(6px)"""
return soup.find("""div""" , class_=class_ ).find("""span""" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
| 160 | 0 |
'''simple docstring'''
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
_lowerCAmelCase = "http://www.mocksite.com/file1.txt"
_lowerCAmelCase = "\"text\": [\"foo\", \"foo\"]"
_lowerCAmelCase = "6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"
class __A :
"""simple docstring"""
A_ = 2_0_0
A_ = {'Content-Length': '100'}
A_ = {}
def snake_case_( self , **_lowerCamelCase )-> Optional[Any]:
return [bytes(_lowerCamelCase , '''utf-8''' )]
def _lowerCAmelCase ( *lowercase : str , **lowercase : Dict ) ->Union[str, Any]:
"""simple docstring"""
return MockResponse()
@pytest.mark.parametrize('''urls_type''' , [str, list, dict] )
def _lowerCAmelCase ( lowercase : Dict , lowercase : Any , lowercase : List[Any] ) ->Optional[Any]:
"""simple docstring"""
import requests
monkeypatch.setattr(lowercase , '''request''' , lowercase )
lowercase__ = URL
if issubclass(lowercase , lowercase ):
lowercase__ = url
elif issubclass(lowercase , lowercase ):
lowercase__ = [url]
elif issubclass(lowercase , lowercase ):
lowercase__ = {'''train''': url}
lowercase__ = '''dummy'''
lowercase__ = '''downloads'''
lowercase__ = tmp_path
lowercase__ = DownloadConfig(
cache_dir=os.path.join(lowercase , lowercase ) , use_etag=lowercase , )
lowercase__ = DownloadManager(dataset_name=lowercase , download_config=lowercase )
lowercase__ = dl_manager.download(lowercase )
lowercase__ = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(lowercase , lowercase ):
lowercase__ = [downloaded_paths]
lowercase__ = [urls]
elif isinstance(lowercase , lowercase ):
assert "train" in downloaded_paths.keys()
lowercase__ = downloaded_paths.values()
lowercase__ = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(lowercase , lowercase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
lowercase__ = Path(lowercase )
lowercase__ = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
lowercase__ = downloaded_path.read_text()
assert content == CONTENT
lowercase__ = downloaded_path.with_suffix('''.json''' )
assert metadata_downloaded_path.exists()
lowercase__ = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('''paths_type''' , [str, list, dict] )
def _lowerCAmelCase ( lowercase : int , lowercase : Tuple , lowercase : List[Any] ) ->Optional[int]:
"""simple docstring"""
lowercase__ = str(lowercase )
if issubclass(lowercase , lowercase ):
lowercase__ = filename
elif issubclass(lowercase , lowercase ):
lowercase__ = [filename]
elif issubclass(lowercase , lowercase ):
lowercase__ = {'''train''': filename}
lowercase__ = '''dummy'''
lowercase__ = xz_file.parent
lowercase__ = '''extracted'''
lowercase__ = DownloadConfig(
cache_dir=lowercase , use_etag=lowercase , )
lowercase__ = DownloadManager(dataset_name=lowercase , download_config=lowercase )
lowercase__ = dl_manager.extract(lowercase )
lowercase__ = paths
for extracted_paths in [extracted_paths]:
if isinstance(lowercase , lowercase ):
lowercase__ = [extracted_paths]
lowercase__ = [paths]
elif isinstance(lowercase , lowercase ):
assert "train" in extracted_paths.keys()
lowercase__ = extracted_paths.values()
lowercase__ = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(lowercase , lowercase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
lowercase__ = Path(lowercase )
lowercase__ = extracted_path.parts
assert parts[-1] == hash_url_to_filename(lowercase , etag=lowercase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
lowercase__ = extracted_path.read_text()
lowercase__ = text_file.read_text()
assert extracted_file_content == expected_file_content
def _lowerCAmelCase ( lowercase : Any , lowercase : int ) ->Dict:
"""simple docstring"""
assert path.endswith('''.jsonl''' )
for num_items, line in enumerate(lowercase , start=1 ):
lowercase__ = json.loads(line.decode('''utf-8''' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('''archive_jsonl''' , ['''tar_jsonl_path''', '''zip_jsonl_path'''] )
def _lowerCAmelCase ( lowercase : int , lowercase : Optional[int] ) ->List[str]:
"""simple docstring"""
lowercase__ = request.getfixturevalue(lowercase )
lowercase__ = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(lowercase ) , start=1 ):
_test_jsonl(lowercase , lowercase )
assert num_jsonl == 2
@pytest.mark.parametrize('''archive_nested_jsonl''' , ['''tar_nested_jsonl_path''', '''zip_nested_jsonl_path'''] )
def _lowerCAmelCase ( lowercase : Optional[Any] , lowercase : Optional[int] ) ->List[Any]:
"""simple docstring"""
lowercase__ = request.getfixturevalue(lowercase )
lowercase__ = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(lowercase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(lowercase ) , start=1 ):
_test_jsonl(lowercase , lowercase )
assert num_tar == 1
assert num_jsonl == 2
def _lowerCAmelCase ( lowercase : Optional[Any] ) ->Optional[Any]:
"""simple docstring"""
lowercase__ = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(lowercase ) , start=1 ):
assert os.path.basename(lowercase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 721 |
'''simple docstring'''
_lowerCAmelCase = "Input must be a string of 8 numbers plus letter"
_lowerCAmelCase = "TRWAGMYFPDXBNJZSQVHLCKE"
def _lowerCAmelCase ( lowercase : str ) ->bool:
"""simple docstring"""
if not isinstance(lowercase , lowercase ):
lowercase__ = F'''Expected string as input, found {type(lowercase ).__name__}'''
raise TypeError(lowercase )
lowercase__ = spanish_id.replace('''-''' , '''''' ).upper()
if len(lowercase ) != 9:
raise ValueError(lowercase )
try:
lowercase__ = int(spanish_id_clean[0:8] )
lowercase__ = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(lowercase ) from ex
if letter.isdigit():
raise ValueError(lowercase )
return letter == LOOKUP_LETTERS[number % 2_3]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.