code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase = {
'configuration_layoutlmv2': ['LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv2Config'],
'processing_layoutlmv2': ['LayoutLMv2Processor'],
'tokenization_layoutlmv2': ['LayoutLMv2Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['LayoutLMv2TokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['LayoutLMv2FeatureExtractor']
UpperCAmelCase = ['LayoutLMv2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv2ForQuestionAnswering',
'LayoutLMv2ForSequenceClassification',
'LayoutLMv2ForTokenClassification',
'LayoutLMv2Layer',
'LayoutLMv2Model',
'LayoutLMv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 433 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json',
}
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = "switch_transformers"
UpperCAmelCase : Union[str, Any] = ["past_key_values"]
UpperCAmelCase : int = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self , A_=3_2128 , A_=768 , A_=64 , A_=2048 , A_=64 , A_=12 , A_=3 , A_=12 , A_=3 , A_=12 , A_=8 , A_=False , A_=0.0_1 , A_="float32" , A_=False , A_=32 , A_=128 , A_=0.1 , A_=1e-6 , A_=0.0_0_1 , A_=0.0_0_1 , A_=1.0 , A_="relu" , A_=True , A_=False , A_=True , A_=0 , A_=1 , **A_ , ) -> int:
lowerCAmelCase = vocab_size
lowerCAmelCase = d_model
lowerCAmelCase = d_kv
lowerCAmelCase = d_ff
lowerCAmelCase = num_sparse_encoder_layers
lowerCAmelCase = num_layers
lowerCAmelCase = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowerCAmelCase = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
lowerCAmelCase = self.num_layers // self.num_sparse_encoder_layers
else:
lowerCAmelCase = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
lowerCAmelCase = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
lowerCAmelCase = self.num_decoder_layers # HACK: this will create 0 sparse layers
lowerCAmelCase = num_heads
lowerCAmelCase = num_experts
lowerCAmelCase = expert_capacity
lowerCAmelCase = router_bias
lowerCAmelCase = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}' )
lowerCAmelCase = router_dtype
lowerCAmelCase = router_ignore_padding_tokens
lowerCAmelCase = relative_attention_num_buckets
lowerCAmelCase = relative_attention_max_distance
lowerCAmelCase = dropout_rate
lowerCAmelCase = layer_norm_epsilon
lowerCAmelCase = initializer_factor
lowerCAmelCase = feed_forward_proj
lowerCAmelCase = use_cache
lowerCAmelCase = add_router_probs
lowerCAmelCase = router_z_loss_coef
lowerCAmelCase = router_aux_loss_coef
lowerCAmelCase = self.feed_forward_proj.split("""-""" )
lowerCAmelCase = act_info[-1]
lowerCAmelCase = act_info[0] == """gated"""
if len(A_ ) > 1 and act_info[0] != "gated" or len(A_ ) > 2:
raise ValueError(
f'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
lowerCAmelCase = """gelu_new"""
super().__init__(
pad_token_id=A_ , eos_token_id=A_ , is_encoder_decoder=A_ , **A_ , ) | 433 | 1 |
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class lowerCAmelCase__ ( UpperCamelCase ):
def __init__( self : Any , _A : str="" , _A : Union[str, Any]="train"):
assert os.path.isdir(_A)
A__ : Optional[Any] = []
A__ : Dict = os.listdir(_A)
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
A__ : Dict = os.path.join(_A , _A)
if not os.path.isfile(_A):
continue
self.documents.append(_A)
def __len__( self : List[Any]):
return len(self.documents)
def __getitem__( self : Dict , _A : List[Any]):
A__ : int = self.documents[idx]
A__ : str = document_path.split("/")[-1]
with open(_A , encoding="utf-8") as source:
A__ : Dict = source.read()
A__ , A__ : Optional[int] = process_story(_A)
return document_name, story_lines, summary_lines
def snake_case__ ( __lowercase ) -> Optional[Any]:
"""simple docstring"""
A__ : List[str] = list(filter(lambda __lowercase : len(__lowercase ) != 0 , [line.strip() for line in raw_story.split("\n" )] ) )
# for some unknown reason some lines miss a period, add it
A__ : List[str] = [_add_missing_period(__lowercase ) for line in nonempty_lines]
# gather article lines
A__ : Dict = []
A__ : List[str] = deque(__lowercase )
while True:
try:
A__ : List[str] = lines.popleft()
if element.startswith("@highlight" ):
break
story_lines.append(__lowercase )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
A__ : List[str] = list(filter(lambda __lowercase : not t.startswith("@highlight" ) , __lowercase ) )
return story_lines, summary_lines
def snake_case__ ( __lowercase ) -> Any:
"""simple docstring"""
A__ : Optional[Any] = [".", "!", "?", "...", "'", "`", "\"", "\u2019", "\u2019", ")"]
if line.startswith("@highlight" ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def snake_case__ ( __lowercase , __lowercase , __lowercase ) -> int:
"""simple docstring"""
if len(__lowercase ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(__lowercase )) )
return sequence
def snake_case__ ( __lowercase , __lowercase ) -> List[str]:
"""simple docstring"""
A__ : Any = torch.ones_like(__lowercase )
A__ : str = sequence == pad_token_id
A__ : Any = 0
return mask
def snake_case__ ( __lowercase , __lowercase , __lowercase ) -> int:
"""simple docstring"""
A__ : Any = [tokenizer.encode(__lowercase ) for line in story_lines]
A__ : str = [token for sentence in story_lines_token_ids for token in sentence]
A__ : Union[str, Any] = [tokenizer.encode(__lowercase ) for line in summary_lines]
A__ : Dict = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def snake_case__ ( __lowercase , __lowercase ) -> str:
"""simple docstring"""
A__ : int = []
for sequence in batch:
A__ : Optional[int] = -1
A__ : Union[str, Any] = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(__lowercase )
return torch.tensor(__lowercase ) | 182 |
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case__ ( __lowercase , __lowercase , __lowercase ) -> List[Any]:
"""simple docstring"""
def get_masked_lm_array(__lowercase ):
A__ : int = F'masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE'
A__ : Dict = tf.train.load_variable(__lowercase , __lowercase )
if "kernel" in name:
A__ : Any = array.transpose()
return torch.from_numpy(__lowercase )
def get_encoder_array(__lowercase ):
A__ : Dict = F'encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE'
A__ : Tuple = tf.train.load_variable(__lowercase , __lowercase )
if "kernel" in name:
A__ : str = array.transpose()
return torch.from_numpy(__lowercase )
def get_encoder_layer_array(__lowercase , __lowercase ):
A__ : int = F'encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE'
A__ : Dict = tf.train.load_variable(__lowercase , __lowercase )
if "kernel" in name:
A__ : Any = array.transpose()
return torch.from_numpy(__lowercase )
def get_encoder_attention_layer_array(__lowercase , __lowercase , __lowercase ):
A__ : List[str] = F'encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE'
A__ : List[Any] = tf.train.load_variable(__lowercase , __lowercase )
A__ : int = array.reshape(__lowercase )
if "kernel" in name:
A__ : Union[str, Any] = array.transpose()
return torch.from_numpy(__lowercase )
print(F'Loading model based on config from {config_path}...' )
A__ : Tuple = BertConfig.from_json_file(__lowercase )
A__ : List[str] = BertForMaskedLM(__lowercase )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
A__ : BertLayer = model.bert.encoder.layer[layer_index]
# Self-attention
A__ : BertSelfAttention = layer.attention.self
A__ : Optional[Any] = get_encoder_attention_layer_array(
__lowercase , "_query_dense/kernel" , self_attn.query.weight.data.shape )
A__ : List[Any] = get_encoder_attention_layer_array(
__lowercase , "_query_dense/bias" , self_attn.query.bias.data.shape )
A__ : int = get_encoder_attention_layer_array(
__lowercase , "_key_dense/kernel" , self_attn.key.weight.data.shape )
A__ : Tuple = get_encoder_attention_layer_array(
__lowercase , "_key_dense/bias" , self_attn.key.bias.data.shape )
A__ : List[Any] = get_encoder_attention_layer_array(
__lowercase , "_value_dense/kernel" , self_attn.value.weight.data.shape )
A__ : Optional[Any] = get_encoder_attention_layer_array(
__lowercase , "_value_dense/bias" , self_attn.value.bias.data.shape )
# Self-attention Output
A__ : BertSelfOutput = layer.attention.output
A__ : Tuple = get_encoder_attention_layer_array(
__lowercase , "_output_dense/kernel" , self_output.dense.weight.data.shape )
A__ : Any = get_encoder_attention_layer_array(
__lowercase , "_output_dense/bias" , self_output.dense.bias.data.shape )
A__ : Dict = get_encoder_layer_array(__lowercase , "_attention_layer_norm/gamma" )
A__ : Optional[int] = get_encoder_layer_array(__lowercase , "_attention_layer_norm/beta" )
# Intermediate
A__ : BertIntermediate = layer.intermediate
A__ : Optional[Any] = get_encoder_layer_array(__lowercase , "_intermediate_dense/kernel" )
A__ : Any = get_encoder_layer_array(__lowercase , "_intermediate_dense/bias" )
# Output
A__ : BertOutput = layer.output
A__ : Dict = get_encoder_layer_array(__lowercase , "_output_dense/kernel" )
A__ : List[Any] = get_encoder_layer_array(__lowercase , "_output_dense/bias" )
A__ : Optional[Any] = get_encoder_layer_array(__lowercase , "_output_layer_norm/gamma" )
A__ : Dict = get_encoder_layer_array(__lowercase , "_output_layer_norm/beta" )
# Embeddings
A__ : str = get_encoder_array("_position_embedding_layer/embeddings" )
A__ : List[str] = get_encoder_array("_type_embedding_layer/embeddings" )
A__ : List[Any] = get_encoder_array("_embedding_norm_layer/gamma" )
A__ : Optional[int] = get_encoder_array("_embedding_norm_layer/beta" )
# LM Head
A__ : Optional[Any] = model.cls.predictions.transform
A__ : int = get_masked_lm_array("dense/kernel" )
A__ : Tuple = get_masked_lm_array("dense/bias" )
A__ : Optional[Any] = get_masked_lm_array("layer_norm/gamma" )
A__ : Tuple = get_masked_lm_array("layer_norm/beta" )
A__ : Any = get_masked_lm_array("embedding_table" )
# Pooling
A__ : Optional[Any] = BertPooler(config=__lowercase )
A__ : BertPooler = get_encoder_array("_pooler_layer/kernel" )
A__ : BertPooler = get_encoder_array("_pooler_layer/bias" )
# Export final model
model.save_pretrained(__lowercase )
# Integration test - should load without any errors ;)
A__ : Union[str, Any] = BertForMaskedLM.from_pretrained(__lowercase )
print(new_model.eval() )
print("Model conversion was done sucessfully!" )
if __name__ == "__main__":
snake_case : str = argparse.ArgumentParser()
parser.add_argument(
'--tf_checkpoint_path', type=str, required=True, help='Path to the TensorFlow Token Dropping checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
type=str,
required=True,
help='The config json file corresponding to the BERT model. This specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path',
type=str,
required=True,
help='Path to the output PyTorch model.',
)
snake_case : Dict = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path) | 182 | 1 |
import sys
def _lowercase ( __UpperCamelCase : Optional[Any] ):
snake_case__ = len(__UpperCamelCase )
snake_case__ = [[0 for x in range(__UpperCamelCase )] for x in range(__UpperCamelCase )]
snake_case__ = [[0 for x in range(__UpperCamelCase )] for x in range(__UpperCamelCase )]
for chain_length in range(2 , __UpperCamelCase ):
for a in range(1 , n - chain_length + 1 ):
snake_case__ = a + chain_length - 1
snake_case__ = sys.maxsize
for c in range(__UpperCamelCase , __UpperCamelCase ):
snake_case__ = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
snake_case__ = cost
snake_case__ = c
return matrix, sol
def _lowercase ( __UpperCamelCase : int , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] ):
if i == j:
print("""A""" + str(__UpperCamelCase ) , end=""" """ )
else:
print("""(""" , end=""" """ )
print_optiomal_solution(__UpperCamelCase , __UpperCamelCase , optimal_solution[i][j] )
print_optiomal_solution(__UpperCamelCase , optimal_solution[i][j] + 1 , __UpperCamelCase )
print(""")""" , end=""" """ )
def _lowercase ( ):
snake_case__ = [30, 35, 15, 5, 10, 20, 25]
snake_case__ = len(__UpperCamelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
snake_case__ , snake_case__ = matrix_chain_order(__UpperCamelCase )
print("""No. of Operation required: """ + str(matrix[1][n - 1] ) )
print_optiomal_solution(__UpperCamelCase , 1 , n - 1 )
if __name__ == "__main__":
main()
| 214 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : int = logging.get_logger(__name__)
lowerCAmelCase : Optional[int] = '''▁'''
lowerCAmelCase : Tuple = {'''vocab_file''': '''sentencepiece.bpe.model'''}
lowerCAmelCase : int = {
'''vocab_file''': {
'''facebook/mbart-large-50-one-to-many-mmt''': (
'''https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model'''
),
}
}
lowerCAmelCase : str = {
'''facebook/mbart-large-50-one-to-many-mmt''': 1024,
}
# fmt: off
lowerCAmelCase : Any = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''', '''af_ZA''', '''az_AZ''', '''bn_IN''', '''fa_IR''', '''he_IL''', '''hr_HR''', '''id_ID''', '''ka_GE''', '''km_KH''', '''mk_MK''', '''ml_IN''', '''mn_MN''', '''mr_IN''', '''pl_PL''', '''ps_AF''', '''pt_XX''', '''sv_SE''', '''sw_KE''', '''ta_IN''', '''te_IN''', '''th_TH''', '''tl_XX''', '''uk_UA''', '''ur_PK''', '''xh_ZA''', '''gl_ES''', '''sl_SI''']
class SCREAMING_SNAKE_CASE__ ( __a ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = VOCAB_FILES_NAMES
UpperCamelCase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : int = ['''input_ids''', '''attention_mask''']
UpperCamelCase__ : List[int] = []
UpperCamelCase__ : List[int] = []
def __init__( self : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : str="</s>" , lowerCAmelCase__ : Dict="</s>" , lowerCAmelCase__ : Any="<s>" , lowerCAmelCase__ : Optional[int]="<unk>" , lowerCAmelCase__ : List[str]="<pad>" , lowerCAmelCase__ : int="<mask>" , lowerCAmelCase__ : Optional[Dict[str, Any]] = None , **lowerCAmelCase__ : Optional[Any] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
snake_case__ = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
snake_case__ = {} if sp_model_kwargs is None else sp_model_kwargs
snake_case__ = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=lowerCAmelCase__ , tgt_lang=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase__ , )
snake_case__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCAmelCase__ ) )
snake_case__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
snake_case__ = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
snake_case__ = 1
snake_case__ = len(self.sp_model )
snake_case__ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowerCAmelCase__ )
}
snake_case__ = {v: k for k, v in self.lang_code_to_id.items()}
snake_case__ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
snake_case__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
snake_case__ = src_lang if src_lang is not None else """en_XX"""
snake_case__ = self.lang_code_to_id[self._src_lang]
snake_case__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def UpperCAmelCase_ ( self : Any ) -> int:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def UpperCAmelCase_ ( self : List[Any] ) -> str:
return self._src_lang
@src_lang.setter
def UpperCAmelCase_ ( self : Optional[Any] , lowerCAmelCase__ : str ) -> None:
snake_case__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Union[str, Any] ) -> Dict:
snake_case__ = self.__dict__.copy()
snake_case__ = None
return state
def __setstate__( self : int , lowerCAmelCase__ : Dict ) -> None:
snake_case__ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
snake_case__ = {}
snake_case__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase_ ( self : int ) -> Dict:
snake_case__ = {self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase_ ( self : int , lowerCAmelCase__ : str ) -> List[str]:
return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ )
def UpperCAmelCase_ ( self : List[Any] , lowerCAmelCase__ : str ) -> int:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case__ = self.sp_model.PieceToId(lowerCAmelCase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase_ ( self : str , lowerCAmelCase__ : int ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase_ ( self : Tuple , lowerCAmelCase__ : Any ) -> Any:
snake_case__ = []
snake_case__ = """"""
snake_case__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase__ ) + token
snake_case__ = True
snake_case__ = []
else:
current_sub_tokens.append(lowerCAmelCase__ )
snake_case__ = False
out_string += self.sp_model.decode(lowerCAmelCase__ )
return out_string.strip()
def UpperCAmelCase_ ( self : str , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase__ , """wb""" ) as fi:
snake_case__ = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__ )
return (out_vocab_file,)
def UpperCAmelCase_ ( self : Union[str, Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
snake_case__ = [1] * len(self.prefix_tokens )
snake_case__ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCAmelCase__ )) + suffix_ones
return prefix_ones + ([0] * len(lowerCAmelCase__ )) + ([0] * len(lowerCAmelCase__ )) + suffix_ones
def UpperCAmelCase_ ( self : int , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCAmelCase_ ( self : Any , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] , lowerCAmelCase__ : Optional[str] , **lowerCAmelCase__ : Union[str, Any] ) -> Any:
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
snake_case__ = src_lang
snake_case__ = self(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
snake_case__ = self.convert_tokens_to_ids(lowerCAmelCase__ )
snake_case__ = tgt_lang_id
return inputs
def UpperCAmelCase_ ( self : List[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str = "en_XX" , lowerCAmelCase__ : Optional[List[str]] = None , lowerCAmelCase__ : str = "ro_RO" , **lowerCAmelCase__ : Dict , ) -> BatchEncoding:
snake_case__ = src_lang
snake_case__ = tgt_lang
return super().prepare_seqaseq_batch(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
def UpperCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCAmelCase_ ( self : Tuple , lowerCAmelCase__ : str ) -> None:
snake_case__ = self.lang_code_to_id[src_lang]
snake_case__ = [self.cur_lang_code_id]
snake_case__ = [self.eos_token_id]
def UpperCAmelCase_ ( self : str , lowerCAmelCase__ : str ) -> None:
snake_case__ = self.lang_code_to_id[tgt_lang]
snake_case__ = [self.cur_lang_code_id]
snake_case__ = [self.eos_token_id]
| 214 | 1 |
import math
from datetime import datetime, timedelta
def lowerCAmelCase_ ( __UpperCAmelCase: int ) -> datetime:
UpperCamelCase__ : int = year % 19
UpperCamelCase__ : List[str] = year % 4
UpperCamelCase__ : str = year % 7
UpperCamelCase__ : Union[str, Any] = math.floor(year / 100 )
UpperCamelCase__ : Union[str, Any] = math.floor((13 + 8 * leap_day_inhibits) / 25 )
UpperCamelCase__ : Tuple = leap_day_inhibits / 4
UpperCamelCase__ : str = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
UpperCamelCase__ : str = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
UpperCamelCase__ : Any = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
UpperCamelCase__ : Dict = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(__UpperCAmelCase , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(__UpperCAmelCase , 4 , 18 )
else:
return datetime(__UpperCAmelCase , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1994, 2000, 2010, 2021, 2023):
UpperCAmelCase_ = 'will be' if year > datetime.now().year else 'was'
print(F'''Easter in {year} {tense} {gauss_easter(year)}''')
| 369 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
UpperCAmelCase_ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'),
('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'),
('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'),
('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'),
('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'),
('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'),
('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'),
('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'),
('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'),
('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'),
]
)
def lowerCAmelCase_ ( __UpperCAmelCase: Tuple , __UpperCAmelCase: List[Any] , __UpperCAmelCase: Dict ) -> Dict:
UpperCamelCase__ : List[str] = state_dict.pop(__UpperCAmelCase )
UpperCamelCase__ : Union[str, Any] = val
def lowerCAmelCase_ ( __UpperCAmelCase: Dict ) -> List[str]:
UpperCamelCase__ : List[str] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCamelCase__ : Optional[Any] = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
UpperCamelCase__ : List[str] = value
else:
UpperCamelCase__ : Optional[int] = value
return new_state_dict
def lowerCAmelCase_ ( __UpperCAmelCase: Optional[int] , __UpperCAmelCase: Optional[int]=False ) -> Optional[int]:
UpperCamelCase__ : List[str] = ''''''
if is_panoptic:
UpperCamelCase__ : List[Any] = '''conditional_detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCamelCase__ : List[Any] = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
UpperCamelCase__ : int = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ : Any = in_proj_weight[:256, :]
UpperCamelCase__ : List[str] = in_proj_bias[:256]
UpperCamelCase__ : Optional[int] = in_proj_weight[256:512, :]
UpperCamelCase__ : List[str] = in_proj_bias[256:512]
UpperCamelCase__ : Tuple = in_proj_weight[-256:, :]
UpperCamelCase__ : List[Any] = in_proj_bias[-256:]
def lowerCAmelCase_ ( ) -> int:
UpperCamelCase__ : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCamelCase__ : Optional[int] = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( __UpperCAmelCase: Any , __UpperCAmelCase: Any ) -> Optional[int]:
UpperCamelCase__ : Tuple = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
UpperCamelCase__ : Optional[Any] = '''resnet101'''
if "dc5" in model_name:
UpperCamelCase__ : Dict = True
UpperCamelCase__ : Union[str, Any] = '''panoptic''' in model_name
if is_panoptic:
UpperCamelCase__ : Any = 250
else:
UpperCamelCase__ : int = 91
UpperCamelCase__ : List[str] = '''huggingface/label-files'''
UpperCamelCase__ : List[str] = '''coco-detection-id2label.json'''
UpperCamelCase__ : Tuple = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
UpperCamelCase__ : List[str] = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
UpperCamelCase__ : Dict = idalabel
UpperCamelCase__ : Dict = {v: k for k, v in idalabel.items()}
# load image processor
UpperCamelCase__ : int = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
UpperCamelCase__ : str = ConditionalDetrImageProcessor(format=__UpperCAmelCase )
# prepare image
UpperCamelCase__ : Tuple = prepare_img()
UpperCamelCase__ : List[str] = image_processor(images=__UpperCAmelCase , return_tensors='''pt''' )
UpperCamelCase__ : Union[str, Any] = encoding['''pixel_values''']
logger.info(f"Converting model {model_name}..." )
# load original model from torch hub
UpperCamelCase__ : Union[str, Any] = torch.hub.load('''DeppMeng/ConditionalDETR''' , __UpperCAmelCase , pretrained=__UpperCAmelCase ).eval()
UpperCamelCase__ : Optional[int] = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
UpperCamelCase__ : List[str] = '''conditional_detr.''' + src
rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
UpperCamelCase__ : str = rename_backbone_keys(__UpperCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(__UpperCAmelCase , is_panoptic=__UpperCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCamelCase__ : List[str] = '''conditional_detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''conditional_detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
UpperCamelCase__ : Tuple = state_dict.pop(__UpperCAmelCase )
UpperCamelCase__ : List[Any] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
UpperCamelCase__ : Any = state_dict.pop(__UpperCAmelCase )
UpperCamelCase__ : Dict = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
UpperCamelCase__ : List[Any] = state_dict.pop(__UpperCAmelCase )
UpperCamelCase__ : Optional[int] = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
UpperCamelCase__ : List[str] = state_dict.pop(__UpperCAmelCase )
UpperCamelCase__ : Optional[int] = val
# finally, create HuggingFace model and load state dict
UpperCamelCase__ : str = ConditionalDetrForSegmentation(__UpperCAmelCase ) if is_panoptic else ConditionalDetrForObjectDetection(__UpperCAmelCase )
model.load_state_dict(__UpperCAmelCase )
model.eval()
model.push_to_hub(repo_id=__UpperCAmelCase , organization='''DepuMeng''' , commit_message='''Add model''' )
# verify our conversion
UpperCamelCase__ : str = conditional_detr(__UpperCAmelCase )
UpperCamelCase__ : Optional[Any] = model(__UpperCAmelCase )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1e-4 )
# Save model and image processor
logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
model.save_pretrained(__UpperCAmelCase )
image_processor.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='conditional_detr_resnet50',
type=str,
help='Name of the CONDITIONAL_DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
UpperCAmelCase_ = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 369 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class _a ( UpperCamelCase__ ):
_lowercase : List[str] = '''roformer'''
def __init__( self: Optional[int] , UpperCamelCase_: Optional[Any]=50_000 , UpperCamelCase_: Tuple=None , UpperCamelCase_: int=768 , UpperCamelCase_: Any=12 , UpperCamelCase_: Optional[int]=12 , UpperCamelCase_: Optional[Any]=3_072 , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: Union[str, Any]=0.1 , UpperCamelCase_: Optional[int]=0.1 , UpperCamelCase_: Tuple=1_536 , UpperCamelCase_: Any=2 , UpperCamelCase_: Dict=0.02 , UpperCamelCase_: Any=1E-1_2 , UpperCamelCase_: Optional[Any]=0 , UpperCamelCase_: str=False , UpperCamelCase_: Optional[int]=True , **UpperCamelCase_: Dict , ) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
lowercase__ = vocab_size
lowercase__ = hidden_size if embedding_size is None else embedding_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = rotary_value
lowercase__ = use_cache
class _a ( UpperCamelCase__ ):
@property
def lowerCamelCase_ ( self: str ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
lowercase__ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase__ = {0: '''batch''', 1: '''sequence'''}
lowercase__ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 43 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 43 | 1 |
"""simple docstring"""
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 95 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
__A : Tuple = None
__A : Optional[Any] = logging.get_logger(__name__)
__A : Union[str, Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
__A : int = {
"vocab_file": {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/spiece.model",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/spiece.model",
},
"tokenizer_file": {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json",
},
}
__A : Optional[int] = {
"google/fnet-base": 512,
"google/fnet-large": 512,
}
__A : List[Any] = "▁"
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = ["""input_ids""", """token_type_ids"""]
UpperCamelCase__ = FNetTokenizer
def __init__( self : Optional[Any] , __UpperCamelCase : Tuple=None , __UpperCamelCase : Dict=None , __UpperCamelCase : List[str]=False , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : List[str]=True , __UpperCamelCase : List[Any]="<unk>" , __UpperCamelCase : Any="[SEP]" , __UpperCamelCase : Dict="<pad>" , __UpperCamelCase : Optional[int]="[CLS]" , __UpperCamelCase : List[Any]="[MASK]" , **__UpperCamelCase : Optional[Any] , )->Dict:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_UpperCAmelCase = (
AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase , normalized=__UpperCamelCase )
if isinstance(__UpperCamelCase , __UpperCamelCase )
else mask_token
)
super().__init__(
__UpperCamelCase , tokenizer_file=__UpperCamelCase , do_lower_case=__UpperCamelCase , remove_space=__UpperCamelCase , keep_accents=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , pad_token=__UpperCamelCase , cls_token=__UpperCamelCase , mask_token=__UpperCamelCase , **__UpperCamelCase , )
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = remove_space
_UpperCAmelCase = keep_accents
_UpperCAmelCase = vocab_file
_UpperCAmelCase = False if not self.vocab_file else True
def lowercase__ ( self : int , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None )->List[int]:
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None )->List[int]:
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ ( self : List[str] , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None )->Tuple[str]:
if not os.path.isdir(__UpperCamelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_UpperCAmelCase = os.path.join(
__UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ):
copyfile(self.vocab_file , __UpperCamelCase )
return (out_vocab_file,)
| 95 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _snake_case ( a__ , unittest.TestCase ):
snake_case__ = LDMTextToImagePipeline
snake_case__ = TEXT_TO_IMAGE_PARAMS - {
"negative_prompt",
"negative_prompt_embeds",
"cross_attention_kwargs",
"prompt_embeds",
}
snake_case__ = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"callback",
"callback_steps",
}
snake_case__ = TEXT_TO_IMAGE_BATCH_PARAMS
snake_case__ = False
def lowerCamelCase__ ( self : Union[str, Any] ):
torch.manual_seed(0 )
__lowerCamelCase : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
__lowerCamelCase : str = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , clip_sample=UpperCAmelCase , set_alpha_to_one=UpperCAmelCase , )
torch.manual_seed(0 )
__lowerCamelCase : List[str] = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , latent_channels=4 , )
torch.manual_seed(0 )
__lowerCamelCase : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__lowerCamelCase : Union[str, Any] = CLIPTextModel(UpperCAmelCase )
__lowerCamelCase : Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__lowerCamelCase : Optional[int] = {
"unet": unet,
"scheduler": scheduler,
"vqvae": vae,
"bert": text_encoder,
"tokenizer": tokenizer,
}
return components
def lowerCamelCase__ ( self : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int]=0 ):
if str(UpperCAmelCase ).startswith("mps" ):
__lowerCamelCase : List[str] = torch.manual_seed(UpperCAmelCase )
else:
__lowerCamelCase : List[Any] = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
__lowerCamelCase : Optional[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def lowerCamelCase__ ( self : Optional[int] ):
__lowerCamelCase : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : List[str] = self.get_dummy_components()
__lowerCamelCase : Optional[int] = LDMTextToImagePipeline(**UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
__lowerCamelCase : Any = self.get_dummy_inputs(UpperCAmelCase )
__lowerCamelCase : Optional[Any] = pipe(**UpperCAmelCase ).images
__lowerCamelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
__lowerCamelCase : Dict = np.array([0.6_1_0_1, 0.6_1_5_6, 0.5_6_2_2, 0.4_8_9_5, 0.6_6_6_1, 0.3_8_0_4, 0.5_7_4_8, 0.6_1_3_6, 0.5_0_1_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def lowerCamelCase__ ( self : List[str] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int]=torch.floataa , UpperCAmelCase : Any=0 ):
__lowerCamelCase : Dict = torch.manual_seed(UpperCAmelCase )
__lowerCamelCase : Any = np.random.RandomState(UpperCAmelCase ).standard_normal((1, 4, 32, 32) )
__lowerCamelCase : int = torch.from_numpy(UpperCAmelCase ).to(device=UpperCAmelCase , dtype=UpperCAmelCase )
__lowerCamelCase : List[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def lowerCamelCase__ ( self : Optional[int] ):
__lowerCamelCase : Tuple = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256" ).to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
__lowerCamelCase : Tuple = self.get_inputs(UpperCAmelCase )
__lowerCamelCase : Tuple = pipe(**UpperCAmelCase ).images
__lowerCamelCase : List[str] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
__lowerCamelCase : Optional[Any] = np.array([0.5_1_8_2_5, 0.5_2_8_5_0, 0.5_2_5_4_3, 0.5_4_2_5_8, 0.5_2_3_0_4, 0.5_2_5_6_9, 0.5_4_3_6_3, 0.5_5_2_7_6, 0.5_6_8_7_8] )
__lowerCamelCase : Union[str, Any] = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1E-3
@nightly
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def lowerCamelCase__ ( self : str ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self : List[str] , UpperCAmelCase : int , UpperCAmelCase : str=torch.floataa , UpperCAmelCase : List[str]=0 ):
__lowerCamelCase : List[Any] = torch.manual_seed(UpperCAmelCase )
__lowerCamelCase : Dict = np.random.RandomState(UpperCAmelCase ).standard_normal((1, 4, 32, 32) )
__lowerCamelCase : Optional[Any] = torch.from_numpy(UpperCAmelCase ).to(device=UpperCAmelCase , dtype=UpperCAmelCase )
__lowerCamelCase : List[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"latents": latents,
"generator": generator,
"num_inference_steps": 50,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def lowerCamelCase__ ( self : Optional[int] ):
__lowerCamelCase : List[Any] = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256" ).to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
__lowerCamelCase : Any = self.get_inputs(UpperCAmelCase )
__lowerCamelCase : List[Any] = pipe(**UpperCAmelCase ).images[0]
__lowerCamelCase : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy" )
__lowerCamelCase : Tuple = np.abs(expected_image - image ).max()
assert max_diff < 1E-3 | 646 | """simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _snake_case ( a__ , a__ , unittest.TestCase ):
snake_case__ = CycleDiffusionPipeline
snake_case__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"negative_prompt",
"height",
"width",
"negative_prompt_embeds",
}
snake_case__ = PipelineTesterMixin.required_optional_params - {"latents"}
snake_case__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"source_prompt"} )
snake_case__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
snake_case__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase__ ( self : Any ):
torch.manual_seed(0 )
__lowerCamelCase : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
__lowerCamelCase : Tuple = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , num_train_timesteps=1000 , clip_sample=UpperCAmelCase , set_alpha_to_one=UpperCAmelCase , )
torch.manual_seed(0 )
__lowerCamelCase : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
__lowerCamelCase : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__lowerCamelCase : Union[str, Any] = CLIPTextModel(UpperCAmelCase )
__lowerCamelCase : str = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__lowerCamelCase : Optional[int] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def lowerCamelCase__ ( self : Union[str, Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str=0 ):
__lowerCamelCase : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
__lowerCamelCase : str = image / 2 + 0.5
if str(UpperCAmelCase ).startswith("mps" ):
__lowerCamelCase : List[Any] = torch.manual_seed(UpperCAmelCase )
else:
__lowerCamelCase : str = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
__lowerCamelCase : str = {
"prompt": "An astronaut riding an elephant",
"source_prompt": "An astronaut riding a horse",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"eta": 0.1,
"strength": 0.8,
"guidance_scale": 3,
"source_guidance_scale": 1,
"output_type": "numpy",
}
return inputs
def lowerCamelCase__ ( self : Dict ):
__lowerCamelCase : Optional[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : List[Any] = self.get_dummy_components()
__lowerCamelCase : Optional[int] = CycleDiffusionPipeline(**UpperCAmelCase )
__lowerCamelCase : List[str] = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
__lowerCamelCase : int = self.get_dummy_inputs(UpperCAmelCase )
__lowerCamelCase : Union[str, Any] = pipe(**UpperCAmelCase )
__lowerCamelCase : List[str] = output.images
__lowerCamelCase : Any = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__lowerCamelCase : Tuple = np.array([0.4_4_5_9, 0.4_9_4_3, 0.4_5_4_4, 0.6_6_4_3, 0.5_4_7_4, 0.4_3_2_7, 0.5_7_0_1, 0.5_9_5_9, 0.5_1_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase : List[Any] = self.get_dummy_components()
for name, module in components.items():
if hasattr(UpperCAmelCase , "half" ):
__lowerCamelCase : str = module.half()
__lowerCamelCase : str = CycleDiffusionPipeline(**UpperCAmelCase )
__lowerCamelCase : Optional[Any] = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
__lowerCamelCase : Union[str, Any] = self.get_dummy_inputs(UpperCAmelCase )
__lowerCamelCase : int = pipe(**UpperCAmelCase )
__lowerCamelCase : Any = output.images
__lowerCamelCase : Optional[int] = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__lowerCamelCase : List[str] = np.array([0.3_5_0_6, 0.4_5_4_3, 0.4_4_6, 0.4_5_7_5, 0.5_1_9_5, 0.4_1_5_5, 0.5_2_7_3, 0.5_1_8, 0.4_1_1_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def lowerCamelCase__ ( self : Tuple ):
return super().test_save_load_local()
@unittest.skip("non-deterministic pipeline" )
def lowerCamelCase__ ( self : Dict ):
return super().test_inference_batch_single_identical()
@skip_mps
def lowerCamelCase__ ( self : Dict ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def lowerCamelCase__ ( self : Union[str, Any] ):
return super().test_save_load_optional_components()
@skip_mps
def lowerCamelCase__ ( self : str ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def lowerCamelCase__ ( self : Optional[int] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self : Union[str, Any] ):
__lowerCamelCase : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/cycle-diffusion/black_colored_car.png" )
__lowerCamelCase : List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy" )
__lowerCamelCase : List[str] = init_image.resize((512, 512) )
__lowerCamelCase : Union[str, Any] = "CompVis/stable-diffusion-v1-4"
__lowerCamelCase : Tuple = DDIMScheduler.from_pretrained(UpperCAmelCase , subfolder="scheduler" )
__lowerCamelCase : Optional[int] = CycleDiffusionPipeline.from_pretrained(
UpperCAmelCase , scheduler=UpperCAmelCase , safety_checker=UpperCAmelCase , torch_dtype=torch.floataa , revision="fp16" )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
__lowerCamelCase : Optional[int] = "A black colored car"
__lowerCamelCase : Dict = "A blue colored car"
__lowerCamelCase : str = torch.manual_seed(0 )
__lowerCamelCase : Tuple = pipe(
prompt=UpperCAmelCase , source_prompt=UpperCAmelCase , image=UpperCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.8_5 , guidance_scale=3 , source_guidance_scale=1 , generator=UpperCAmelCase , output_type="np" , )
__lowerCamelCase : Union[str, Any] = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def lowerCamelCase__ ( self : int ):
__lowerCamelCase : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/cycle-diffusion/black_colored_car.png" )
__lowerCamelCase : int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy" )
__lowerCamelCase : int = init_image.resize((512, 512) )
__lowerCamelCase : List[Any] = "CompVis/stable-diffusion-v1-4"
__lowerCamelCase : Union[str, Any] = DDIMScheduler.from_pretrained(UpperCAmelCase , subfolder="scheduler" )
__lowerCamelCase : str = CycleDiffusionPipeline.from_pretrained(UpperCAmelCase , scheduler=UpperCAmelCase , safety_checker=UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
__lowerCamelCase : List[Any] = "A black colored car"
__lowerCamelCase : List[Any] = "A blue colored car"
__lowerCamelCase : Dict = torch.manual_seed(0 )
__lowerCamelCase : Any = pipe(
prompt=UpperCAmelCase , source_prompt=UpperCAmelCase , image=UpperCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.8_5 , guidance_scale=3 , source_guidance_scale=1 , generator=UpperCAmelCase , output_type="np" , )
__lowerCamelCase : Optional[int] = output.images
assert np.abs(image - expected_image ).max() < 2E-2 | 646 | 1 |
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
lowercase__ = FlaxDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''', safety_checker=lowerCamelCase, cache_dir=lowerCamelCase )
lowercase__ = [t[-1] for t in os.walk(os.path.join(lowerCamelCase, os.listdir(lowerCamelCase )[0], '''snapshots''' ) )]
lowercase__ = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('''.bin''' ) for f in files )
@slow
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ , lowercase__ = FlaxStableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''', safety_checker=lowerCamelCase )
lowercase__ = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
lowercase__ = jax.random.PRNGKey(0 )
lowercase__ = 4
lowercase__ = jax.device_count()
lowercase__ = num_samples * [prompt]
lowercase__ = pipeline.prepare_inputs(lowerCamelCase )
# shard inputs and rng
lowercase__ = replicate(lowerCamelCase )
lowercase__ = jax.random.split(lowerCamelCase, lowerCamelCase )
lowercase__ = shard(lowerCamelCase )
lowercase__ = pipeline(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, jit=lowerCamelCase ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 4.1514745 ) < 1E-3
assert np.abs(np.abs(lowerCamelCase, dtype=np.floataa ).sum() - 49947.875 ) < 5E-1
lowercase__ = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(lowerCamelCase ) == num_samples
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ , lowercase__ = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''', revision='''flax''', safety_checker=lowerCamelCase )
lowercase__ = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
lowercase__ = jax.random.PRNGKey(0 )
lowercase__ = 50
lowercase__ = jax.device_count()
lowercase__ = num_samples * [prompt]
lowercase__ = pipeline.prepare_inputs(lowerCamelCase )
# shard inputs and rng
lowercase__ = replicate(lowerCamelCase )
lowercase__ = jax.random.split(lowerCamelCase, lowerCamelCase )
lowercase__ = shard(lowerCamelCase )
lowercase__ = pipeline(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, jit=lowerCamelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.05652401) ) < 1E-3
assert np.abs((np.abs(lowerCamelCase, dtype=np.floataa ).sum() - 2383808.2) ) < 5E-1
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ , lowercase__ = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''', revision='''bf16''', dtype=jnp.bfloataa, safety_checker=lowerCamelCase )
lowercase__ = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
lowercase__ = jax.random.PRNGKey(0 )
lowercase__ = 50
lowercase__ = jax.device_count()
lowercase__ = num_samples * [prompt]
lowercase__ = pipeline.prepare_inputs(lowerCamelCase )
# shard inputs and rng
lowercase__ = replicate(lowerCamelCase )
lowercase__ = jax.random.split(lowerCamelCase, lowerCamelCase )
lowercase__ = shard(lowerCamelCase )
lowercase__ = pipeline(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, jit=lowerCamelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.04003906) ) < 1E-3
assert np.abs((np.abs(lowerCamelCase, dtype=np.floataa ).sum() - 2373516.75) ) < 5E-1
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ , lowercase__ = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''', revision='''bf16''', dtype=jnp.bfloataa )
lowercase__ = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
lowercase__ = jax.random.PRNGKey(0 )
lowercase__ = 50
lowercase__ = jax.device_count()
lowercase__ = num_samples * [prompt]
lowercase__ = pipeline.prepare_inputs(lowerCamelCase )
# shard inputs and rng
lowercase__ = replicate(lowerCamelCase )
lowercase__ = jax.random.split(lowerCamelCase, lowerCamelCase )
lowercase__ = shard(lowerCamelCase )
lowercase__ = pipeline(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, jit=lowerCamelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.04003906) ) < 1E-3
assert np.abs((np.abs(lowerCamelCase, dtype=np.floataa ).sum() - 2373516.75) ) < 5E-1
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = FlaxDDIMScheduler(
beta_start=0.00085, beta_end=0.012, beta_schedule='''scaled_linear''', set_alpha_to_one=lowerCamelCase, steps_offset=1, )
lowercase__ , lowercase__ = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''', revision='''bf16''', dtype=jnp.bfloataa, scheduler=lowerCamelCase, safety_checker=lowerCamelCase, )
lowercase__ = scheduler.create_state()
lowercase__ = scheduler_state
lowercase__ = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
lowercase__ = jax.random.PRNGKey(0 )
lowercase__ = 50
lowercase__ = jax.device_count()
lowercase__ = num_samples * [prompt]
lowercase__ = pipeline.prepare_inputs(lowerCamelCase )
# shard inputs and rng
lowercase__ = replicate(lowerCamelCase )
lowercase__ = jax.random.split(lowerCamelCase, lowerCamelCase )
lowercase__ = shard(lowerCamelCase )
lowercase__ = pipeline(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, jit=lowerCamelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.045043945) ) < 1E-3
assert np.abs((np.abs(lowerCamelCase, dtype=np.floataa ).sum() - 2347693.5) ) < 5E-1
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
lowercase__ = jax.device_count()
lowercase__ = num_samples * [prompt]
lowercase__ = jax.random.split(jax.random.PRNGKey(0 ), lowerCamelCase )
lowercase__ , lowercase__ = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''', revision='''bf16''', dtype=jnp.bfloataa, safety_checker=lowerCamelCase, )
lowercase__ = replicate(lowerCamelCase )
lowercase__ = pipeline.prepare_inputs(lowerCamelCase )
lowercase__ = shard(lowerCamelCase )
lowercase__ = pipeline(lowerCamelCase, lowerCamelCase, lowerCamelCase, jit=lowerCamelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
lowercase__ = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
lowercase__ , lowercase__ = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''', revision='''bf16''', dtype=jnp.bfloataa, safety_checker=lowerCamelCase, use_memory_efficient_attention=lowerCamelCase, )
lowercase__ = replicate(lowerCamelCase )
lowercase__ = pipeline.prepare_inputs(lowerCamelCase )
lowercase__ = shard(lowerCamelCase )
lowercase__ = pipeline(lowerCamelCase, lowerCamelCase, lowerCamelCase, jit=lowerCamelCase ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
lowercase__ = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 671 |
from functools import reduce
A__ : Union[str, Any] = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def a ( lowerCamelCase_ = N ):
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda lowerCamelCase_ , lowerCamelCase_ : str(int(lowerCamelCase_ ) * int(lowerCamelCase_ ) ) , n[i : i + 13] ) )
for i in range(len(lowerCamelCase_ ) - 12 ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 671 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_UpperCAmelCase : int = logging.get_logger(__name__)
_UpperCAmelCase : Optional[int] = {
'''microsoft/swin-tiny-patch4-window7-224''': (
'''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'''
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class __magic_name__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = 'swin'
UpperCamelCase__ = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , snake_case_=2_24 , snake_case_=4 , snake_case_=3 , snake_case_=96 , snake_case_=[2, 2, 6, 2] , snake_case_=[3, 6, 12, 24] , snake_case_=7 , snake_case_=4.0 , snake_case_=True , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.1 , snake_case_="gelu" , snake_case_=False , snake_case_=0.02 , snake_case_=1E-5 , snake_case_=32 , snake_case_=None , snake_case_=None , **snake_case_ , ):
super().__init__(**snake_case_ )
lowercase =image_size
lowercase =patch_size
lowercase =num_channels
lowercase =embed_dim
lowercase =depths
lowercase =len(snake_case_ )
lowercase =num_heads
lowercase =window_size
lowercase =mlp_ratio
lowercase =qkv_bias
lowercase =hidden_dropout_prob
lowercase =attention_probs_dropout_prob
lowercase =drop_path_rate
lowercase =hidden_act
lowercase =use_absolute_embeddings
lowercase =layer_norm_eps
lowercase =initializer_range
lowercase =encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowercase =int(embed_dim * 2 ** (len(snake_case_ ) - 1) )
lowercase =['''stem'''] + [f'stage{idx}' for idx in range(1 , len(snake_case_ ) + 1 )]
lowercase , lowercase =get_aligned_output_features_output_indices(
out_features=snake_case_ , out_indices=snake_case_ , stage_names=self.stage_names )
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = version.parse('1.11' )
@property
def _A( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _A( self ):
return 1E-4
| 72 |
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = ["image_processor", "tokenizer"]
UpperCAmelCase_ = "AutoImageProcessor"
UpperCAmelCase_ = "AutoTokenizer"
def __init__( self : Tuple, _UpperCAmelCase : str=None, _UpperCAmelCase : str=None, **_UpperCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead.", _UpperCAmelCase, )
SCREAMING_SNAKE_CASE__ : str = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE__ : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : int = self.image_processor
SCREAMING_SNAKE_CASE__ : Any = False
def __call__( self : List[str], *_UpperCAmelCase : Any, **_UpperCAmelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_UpperCAmelCase, **_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = kwargs.pop("images", _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = kwargs.pop("text", _UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
SCREAMING_SNAKE_CASE__ : Optional[int] = args[0]
SCREAMING_SNAKE_CASE__ : str = args[1:]
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
SCREAMING_SNAKE_CASE__ : Dict = self.image_processor(_UpperCAmelCase, *_UpperCAmelCase, **_UpperCAmelCase )
if text is not None:
SCREAMING_SNAKE_CASE__ : str = self.tokenizer(_UpperCAmelCase, **_UpperCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = encodings["input_ids"]
return inputs
def A_ ( self : Dict, *_UpperCAmelCase : Tuple, **_UpperCAmelCase : Dict ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*_UpperCAmelCase, **_UpperCAmelCase )
def A_ ( self : List[str], *_UpperCAmelCase : int, **_UpperCAmelCase : Dict ) -> Any:
"""simple docstring"""
return self.tokenizer.decode(*_UpperCAmelCase, **_UpperCAmelCase )
@contextmanager
def A_ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your images inputs, or in a separate call." )
SCREAMING_SNAKE_CASE__ : Any = True
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer
yield
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
def A_ ( self : Tuple, _UpperCAmelCase : List[Any], _UpperCAmelCase : int=False, _UpperCAmelCase : Optional[Any]=None ) -> Any:
"""simple docstring"""
if added_vocab is None:
SCREAMING_SNAKE_CASE__ : Tuple = self.tokenizer.get_added_vocab()
SCREAMING_SNAKE_CASE__ : str = {}
while tokens:
SCREAMING_SNAKE_CASE__ : Dict = re.search(r"<s_(.*?)>", _UpperCAmelCase, re.IGNORECASE )
if start_token is None:
break
SCREAMING_SNAKE_CASE__ : Any = start_token.group(1 )
SCREAMING_SNAKE_CASE__ : Dict = re.search(rF'''</s_{key}>''', _UpperCAmelCase, re.IGNORECASE )
SCREAMING_SNAKE_CASE__ : Any = start_token.group()
if end_token is None:
SCREAMING_SNAKE_CASE__ : List[str] = tokens.replace(_UpperCAmelCase, "" )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = end_token.group()
SCREAMING_SNAKE_CASE__ : List[str] = re.escape(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = re.escape(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = re.search(F'''{start_token_escaped}(.*?){end_token_escaped}''', _UpperCAmelCase, re.IGNORECASE )
if content is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
SCREAMING_SNAKE_CASE__ : str = self.tokenajson(_UpperCAmelCase, is_inner_value=_UpperCAmelCase, added_vocab=_UpperCAmelCase )
if value:
if len(_UpperCAmelCase ) == 1:
SCREAMING_SNAKE_CASE__ : str = value[0]
SCREAMING_SNAKE_CASE__ : List[str] = value
else: # leaf nodes
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for leaf in content.split(r"<sep/>" ):
SCREAMING_SNAKE_CASE__ : Tuple = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
SCREAMING_SNAKE_CASE__ : str = leaf[1:-2] # for categorical special tokens
output[key].append(_UpperCAmelCase )
if len(output[key] ) == 1:
SCREAMING_SNAKE_CASE__ : str = output[key][0]
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokens[tokens.find(_UpperCAmelCase ) + len(_UpperCAmelCase ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:], is_inner_value=_UpperCAmelCase, added_vocab=_UpperCAmelCase )
if len(_UpperCAmelCase ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def A_ ( self : str ) -> Optional[Any]:
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.", _UpperCAmelCase, )
return self.image_processor_class
@property
def A_ ( self : int ) -> List[str]:
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.", _UpperCAmelCase, )
return self.image_processor
| 663 | 0 |
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase__ , lowercase__=1_3 , lowercase__=7 , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=9_9 , lowercase__=3_2 , lowercase__=2 , lowercase__=4 , lowercase__=3_7 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=1_6 , lowercase__=2 , lowercase__=0.02 , lowercase__=False , lowercase__=True , lowercase__="None" , lowercase__=3 , lowercase__=4 , lowercase__=None , ):
'''simple docstring'''
__A =parent
__A =batch_size
__A =seq_length
__A =is_training
__A =use_input_mask
__A =use_token_type_ids
__A =use_labels
__A =vocab_size
__A =hidden_size
__A =num_hidden_layers
__A =num_attention_heads
__A =intermediate_size
__A =hidden_act
__A =hidden_dropout_prob
__A =attention_probs_dropout_prob
__A =max_position_embeddings
__A =type_vocab_size
__A =type_sequence_label_size
__A =initializer_range
__A =num_labels
__A =num_choices
__A =relative_attention
__A =position_biased_input
__A =pos_att_type
__A =scope
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A =None
if self.use_input_mask:
__A =random_attention_mask([self.batch_size, self.seq_length] )
__A =None
if self.use_token_type_ids:
__A =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__A =None
__A =None
__A =None
if self.use_labels:
__A =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__A =DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=A_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
__A =TFDebertaVaModel(config=A_ )
__A ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__A =[input_ids, input_mask]
__A =model(A_ )
__A =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
__A =TFDebertaVaForMaskedLM(config=A_ )
__A ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__A =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
__A =self.num_labels
__A =TFDebertaVaForSequenceClassification(config=A_ )
__A ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__A =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
__A =self.num_labels
__A =TFDebertaVaForTokenClassification(config=A_ )
__A ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__A =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
__A =TFDebertaVaForQuestionAnswering(config=A_ )
__A ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__A =model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.prepare_config_and_inputs()
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) =config_and_inputs
__A ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
lowercase_ = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
lowercase_ = (
{
'''feature-extraction''': TFDebertaVaModel,
'''fill-mask''': TFDebertaVaForMaskedLM,
'''question-answering''': TFDebertaVaForQuestionAnswering,
'''text-classification''': TFDebertaVaForSequenceClassification,
'''token-classification''': TFDebertaVaForTokenClassification,
'''zero-shot''': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase_ = False
lowercase_ = False
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =TFDebertaVaModelTester(self )
__A =ConfigTester(self , config_class=A_ , hidden_size=3_7 )
def __UpperCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A_ )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A_ )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
@slow
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' )
self.assertIsNotNone(A_ )
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason='''Model not available yet''' )
def __UpperCamelCase ( self ):
'''simple docstring'''
pass
@slow
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' )
__A =tf.constant([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
__A =tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__A =model(A_ , attention_mask=A_ )[0]
__A =tf.constant(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , A_ , atol=1E-4 )
| 705 |
from ..utils import DummyObject, requires_backends
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
def A__ ( *__A : Optional[int] , **__A : Union[str, Any] ) ->Dict:
requires_backends(__A , ['''torch'''] )
def A__ ( *__A : str , **__A : int ) ->List[Any]:
requires_backends(__A , ['''torch'''] )
def A__ ( *__A : Dict , **__A : str ) ->Tuple:
requires_backends(__A , ['''torch'''] )
def A__ ( *__A : Optional[Any] , **__A : Dict ) ->Optional[Any]:
requires_backends(__A , ['''torch'''] )
def A__ ( *__A : str , **__A : str ) ->Any:
requires_backends(__A , ['''torch'''] )
def A__ ( *__A : List[Any] , **__A : Dict ) ->Union[str, Any]:
requires_backends(__A , ['''torch'''] )
def A__ ( *__A : Optional[int] , **__A : Optional[int] ) ->Any:
requires_backends(__A , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
| 516 | 0 |
'''simple docstring'''
from manim import *
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : str ) ->List[str]:
UpperCAmelCase_ = Rectangle(height=0.5 , width=0.5 )
UpperCAmelCase_ = Rectangle(height=0.25 , width=0.25 )
UpperCAmelCase_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCAmelCase_ = [mem.copy() for i in range(6 )]
UpperCAmelCase_ = [mem.copy() for i in range(6 )]
UpperCAmelCase_ = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 )
UpperCAmelCase_ = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 )
UpperCAmelCase_ = VGroup(UpperCAmelCase__ , UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 )
UpperCAmelCase_ = Text('''CPU''' , font_size=24 )
UpperCAmelCase_ = Group(UpperCAmelCase__ , UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0.5 , aligned_edge=UpperCAmelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCAmelCase__ )
UpperCAmelCase_ = [mem.copy() for i in range(4 )]
UpperCAmelCase_ = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 )
UpperCAmelCase_ = Text('''GPU''' , font_size=24 )
UpperCAmelCase_ = Group(UpperCAmelCase__ , UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0.5 , aligned_edge=UpperCAmelCase__ )
gpu.move_to([-1, -1, 0] )
self.add(UpperCAmelCase__ )
UpperCAmelCase_ = [mem.copy() for i in range(6 )]
UpperCAmelCase_ = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 )
UpperCAmelCase_ = Text('''Model''' , font_size=24 )
UpperCAmelCase_ = Group(UpperCAmelCase__ , UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0.5 , aligned_edge=UpperCAmelCase__ )
model.move_to([3, -1.0, 0] )
self.add(UpperCAmelCase__ )
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for i, rect in enumerate(UpperCAmelCase__ ):
rect.set_stroke(UpperCAmelCase__ )
UpperCAmelCase_ = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(UpperCAmelCase__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=UpperCAmelCase__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=UpperCAmelCase__ , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=UpperCAmelCase__ , buff=0.0 )
self.add(UpperCAmelCase__ )
model_cpu_arr.append(UpperCAmelCase__ )
self.add(*UpperCAmelCase__ , *UpperCAmelCase__ , *UpperCAmelCase__ )
UpperCAmelCase_ = [mem.copy() for i in range(6 )]
UpperCAmelCase_ = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 )
UpperCAmelCase_ = Text('''Loaded Checkpoint''' , font_size=24 )
UpperCAmelCase_ = Group(UpperCAmelCase__ , UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0.5 , aligned_edge=UpperCAmelCase__ )
checkpoint.move_to([3, 0.5, 0] )
self.add(UpperCAmelCase__ )
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for i, rect in enumerate(UpperCAmelCase__ ):
UpperCAmelCase_ = fill.copy().set_fill(UpperCAmelCase__ , opacity=0.7 )
target.move_to(UpperCAmelCase__ )
ckpt_arr.append(UpperCAmelCase__ )
UpperCAmelCase_ = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(UpperCAmelCase__ )
self.add(*UpperCAmelCase__ , *UpperCAmelCase__ )
UpperCAmelCase_ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCAmelCase_ = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(UpperCAmelCase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(UpperCAmelCase__ )
UpperCAmelCase_ = MarkupText(
f"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
UpperCAmelCase_ = [meta_mem.copy() for i in range(6 )]
UpperCAmelCase_ = [meta_mem.copy() for i in range(6 )]
UpperCAmelCase_ = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 )
UpperCAmelCase_ = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 )
UpperCAmelCase_ = VGroup(UpperCAmelCase__ , UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 )
UpperCAmelCase_ = Text('''Disk''' , font_size=24 )
UpperCAmelCase_ = Group(UpperCAmelCase__ , UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0.5 , aligned_edge=UpperCAmelCase__ )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(UpperCAmelCase__ , run_time=3 ) , Write(UpperCAmelCase__ , run_time=1 ) , Create(UpperCAmelCase__ , run_time=1 ) )
UpperCAmelCase_ = []
for i, rect in enumerate(UpperCAmelCase__ ):
UpperCAmelCase_ = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(UpperCAmelCase__ , run_time=1.5 ) )
self.play(*UpperCAmelCase__ )
self.play(FadeOut(UpperCAmelCase__ ) )
UpperCAmelCase_ = MarkupText(f"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase__ , run_time=3 ) )
self.play(
FadeOut(UpperCAmelCase__ , UpperCAmelCase__ , *UpperCAmelCase__ , *UpperCAmelCase__ ) , )
self.wait()
| 390 | '''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ : Any = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : int = [
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
lowercase__ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 390 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__SCREAMING_SNAKE_CASE = {
'configuration_bloom': ['BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BloomConfig', 'BloomOnnxConfig'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = ['BloomTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
'BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST',
'BloomForCausalLM',
'BloomModel',
'BloomPreTrainedModel',
'BloomForSequenceClassification',
'BloomForTokenClassification',
'BloomForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 706 |
__SCREAMING_SNAKE_CASE = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
__SCREAMING_SNAKE_CASE = [{'type': 'code', 'content': INSTALL_CONTENT}]
__SCREAMING_SNAKE_CASE = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 153 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class __UpperCamelCase ( unittest.TestCase ):
@slow
def __a ( self ) -> Any:
for model_name in ["bert-base-uncased"]:
a : Optional[int] = AutoConfig.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
a : Dict = TFAutoModel.from_pretrained(_lowerCamelCase , from_pt=_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
a : Optional[int] = AutoModel.from_pretrained(_lowerCamelCase , from_tf=_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
@slow
def __a ( self ) -> Tuple:
for model_name in ["bert-base-uncased"]:
a : List[str] = AutoConfig.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
a : Optional[int] = TFAutoModelForPreTraining.from_pretrained(_lowerCamelCase , from_pt=_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
a : Dict = AutoModelForPreTraining.from_pretrained(_lowerCamelCase , from_tf=_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
@slow
def __a ( self ) -> str:
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : List[str] = AutoConfig.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
a : Any = TFAutoModelForCausalLM.from_pretrained(_lowerCamelCase , from_pt=_lowerCamelCase )
a : Optional[Any] = TFAutoModelForCausalLM.from_pretrained(
_lowerCamelCase , output_loading_info=_lowerCamelCase , from_pt=_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
a : List[str] = AutoModelForCausalLM.from_pretrained(_lowerCamelCase , from_tf=_lowerCamelCase )
a : List[Any] = AutoModelForCausalLM.from_pretrained(
_lowerCamelCase , output_loading_info=_lowerCamelCase , from_tf=_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
@slow
def __a ( self ) -> Any:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Optional[Any] = AutoConfig.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
a : Any = TFAutoModelWithLMHead.from_pretrained(_lowerCamelCase , from_pt=_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
a : Any = AutoModelWithLMHead.from_pretrained(_lowerCamelCase , from_tf=_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
@slow
def __a ( self ) -> Optional[Any]:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : List[str] = AutoConfig.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
a : int = TFAutoModelForMaskedLM.from_pretrained(_lowerCamelCase , from_pt=_lowerCamelCase )
a : List[Any] = TFAutoModelForMaskedLM.from_pretrained(
_lowerCamelCase , output_loading_info=_lowerCamelCase , from_pt=_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
a : Optional[Any] = AutoModelForMaskedLM.from_pretrained(_lowerCamelCase , from_tf=_lowerCamelCase )
a : str = AutoModelForMaskedLM.from_pretrained(
_lowerCamelCase , output_loading_info=_lowerCamelCase , from_tf=_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
@slow
def __a ( self ) -> List[str]:
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Union[str, Any] = AutoConfig.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
a : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(_lowerCamelCase , from_pt=_lowerCamelCase )
a : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(
_lowerCamelCase , output_loading_info=_lowerCamelCase , from_pt=_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
a : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(_lowerCamelCase , from_tf=_lowerCamelCase )
a : int = AutoModelForSeqaSeqLM.from_pretrained(
_lowerCamelCase , output_loading_info=_lowerCamelCase , from_tf=_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
@slow
def __a ( self ) -> Dict:
for model_name in ["bert-base-uncased"]:
a : Optional[Any] = AutoConfig.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
a : List[str] = TFAutoModelForSequenceClassification.from_pretrained(_lowerCamelCase , from_pt=_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
a : List[str] = AutoModelForSequenceClassification.from_pretrained(_lowerCamelCase , from_tf=_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
@slow
def __a ( self ) -> Dict:
for model_name in ["bert-base-uncased"]:
a : Tuple = AutoConfig.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
a : Any = TFAutoModelForQuestionAnswering.from_pretrained(_lowerCamelCase , from_pt=_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
a : List[Any] = AutoModelForQuestionAnswering.from_pretrained(_lowerCamelCase , from_tf=_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
def __a ( self ) -> List[Any]:
a : str = TFAutoModelWithLMHead.from_pretrained(_lowerCamelCase , from_pt=_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=_lowerCamelCase ) , 1_4410 )
a : int = AutoModelWithLMHead.from_pretrained(_lowerCamelCase , from_tf=_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=_lowerCamelCase ) , 1_4410 )
def __a ( self ) -> List[str]:
a : List[str] = TFAutoModelWithLMHead.from_pretrained(_lowerCamelCase , from_pt=_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=_lowerCamelCase ) , 1_4410 )
a : List[Any] = AutoModelWithLMHead.from_pretrained(_lowerCamelCase , from_tf=_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=_lowerCamelCase ) , 1_4410 )
| 633 |
"""simple docstring"""
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_A = {
"""configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""],
"""tokenization_cpmant""": ["""CpmAntTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CpmAntForCausalLM""",
"""CpmAntModel""",
"""CpmAntPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 182 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class a :
"""simple docstring"""
def __init__( self : Tuple , snake_case_ : int , snake_case_ : int , snake_case_ : float = 0 ):
'''simple docstring'''
snake_case__ , snake_case__ : Optional[Any] = row, column
snake_case__ : Dict = [[default_value for c in range(snake_case_ )] for r in range(snake_case_ )]
def __str__( self : Optional[Any] ):
'''simple docstring'''
snake_case__ : Optional[int] = F"""Matrix consist of {self.row} rows and {self.column} columns\n"""
# Make string identifier
snake_case__ : List[str] = 0
for row_vector in self.array:
for obj in row_vector:
snake_case__ : str = max(snake_case_ , len(str(snake_case_ ) ) )
snake_case__ : Dict = F"""%{max_element_length}s"""
# Make string and return
def single_line(snake_case_ : list[float] ) -> str:
nonlocal string_format_identifier
snake_case__ : str = '''['''
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(snake_case_ ) for row_vector in self.array )
return s
def __repr__( self : Optional[int] ):
'''simple docstring'''
return str(self )
def __magic_name__ ( self : Optional[int] , snake_case_ : tuple[int, int] ):
'''simple docstring'''
if not (isinstance(snake_case_ , (list, tuple) ) and len(snake_case_ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : List[str] , snake_case_ : tuple[int, int] ):
'''simple docstring'''
assert self.validate_indicies(snake_case_ )
return self.array[loc[0]][loc[1]]
def __setitem__( self : int , snake_case_ : tuple[int, int] , snake_case_ : float ):
'''simple docstring'''
assert self.validate_indicies(snake_case_ )
snake_case__ : Optional[Any] = value
def __add__( self : Dict , snake_case_ : Matrix ):
'''simple docstring'''
assert isinstance(snake_case_ , snake_case_ )
assert self.row == another.row and self.column == another.column
# Add
snake_case__ : str = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
snake_case__ : Optional[Any] = self[r, c] + another[r, c]
return result
def __neg__( self : Optional[Any] ):
'''simple docstring'''
snake_case__ : int = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
snake_case__ : str = -self[r, c]
return result
def __sub__( self : Optional[int] , snake_case_ : Matrix ):
'''simple docstring'''
return self + (-another)
def __mul__( self : int , snake_case_ : int | float | Matrix ):
'''simple docstring'''
if isinstance(snake_case_ , (int, float) ): # Scalar multiplication
snake_case__ : Dict = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
snake_case__ : Tuple = self[r, c] * another
return result
elif isinstance(snake_case_ , snake_case_ ): # Matrix multiplication
assert self.column == another.row
snake_case__ : List[Any] = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
snake_case__ : Union[str, Any] = F"""Unsupported type given for another ({type(snake_case_ )})"""
raise TypeError(snake_case_ )
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
snake_case__ : Optional[int] = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
snake_case__ : List[Any] = self[r, c]
return result
def __magic_name__ ( self : Tuple , snake_case_ : Matrix , snake_case_ : Matrix ):
'''simple docstring'''
assert isinstance(snake_case_ , snake_case_ ) and isinstance(snake_case_ , snake_case_ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
snake_case__ : Tuple = v.transpose()
snake_case__ : Optional[Any] = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def _a ( ):
"""simple docstring"""
snake_case__ : Tuple = Matrix(3 , 3 , 0 )
for i in range(3 ):
snake_case__ : Any = 1
print(F"""a^(-1) is {ainv}""" )
# u, v
snake_case__ : int = Matrix(3 , 1 , 0 )
snake_case__ , snake_case__ , snake_case__ : List[Any] = 1, 2, -3
snake_case__ : Any = Matrix(3 , 1 , 0 )
snake_case__ , snake_case__ , snake_case__ : Optional[int] = 4, -2, 5
print(F"""u is {u}""" )
print(F"""v is {v}""" )
print(F"""uv^T is {u * v.transpose()}""" )
# Sherman Morrison
print(F"""(a + uv^T)^(-1) is {ainv.sherman_morrison(__lowerCAmelCase , __lowerCAmelCase )}""" )
def _a ( ):
"""simple docstring"""
import doctest
doctest.testmod()
testa()
| 502 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
lowerCAmelCase__ : List[Any] = """Run commands across TPU VMs for initial setup before running `accelerate launch`."""
def _a ( __lowerCAmelCase : Any=None ):
"""simple docstring"""
if subparsers is not None:
snake_case__ : Tuple = subparsers.add_parser('''tpu-config''' , description=_description )
else:
snake_case__ : str = argparse.ArgumentParser('''Accelerate tpu-config command''' , description=_description )
# Core arguments
snake_case__ : Any = parser.add_argument_group(
'''Config Arguments''' , '''Arguments that can be configured through `accelerate config`.''' )
config_args.add_argument(
'''--config_file''' , type=__lowerCAmelCase , default=__lowerCAmelCase , help='''Path to the config file to use for accelerate.''' , )
config_args.add_argument(
'''--tpu_name''' , default=__lowerCAmelCase , help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''' , )
config_args.add_argument(
'''--tpu_zone''' , default=__lowerCAmelCase , help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''' , )
snake_case__ : Optional[Any] = parser.add_argument_group('''TPU Arguments''' , '''Arguments for options ran inside the TPU.''' )
pod_args.add_argument(
'''--use_alpha''' , action='''store_true''' , help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''' , )
pod_args.add_argument(
'''--command_file''' , default=__lowerCAmelCase , help='''The path to the file containing the commands to run on the pod on startup.''' , )
pod_args.add_argument(
'''--command''' , action='''append''' , nargs='''+''' , help='''A command to run on the pod. Can be passed multiple times.''' , )
pod_args.add_argument(
'''--install_accelerate''' , action='''store_true''' , help='''Whether to install accelerate on the pod. Defaults to False.''' , )
pod_args.add_argument(
'''--accelerate_version''' , default='''latest''' , help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''' , )
pod_args.add_argument(
'''--debug''' , action='''store_true''' , help='''If set, will print the command that would be run instead of running it.''' )
if subparsers is not None:
parser.set_defaults(func=__lowerCAmelCase )
return parser
def _a ( __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
snake_case__ : Optional[int] = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(__lowerCAmelCase ):
snake_case__ : Tuple = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
snake_case__ : Optional[int] = defaults.command_file
if not args.command and defaults.commands is not None:
snake_case__ : Union[str, Any] = defaults.commands
if not args.tpu_name:
snake_case__ : Union[str, Any] = defaults.tpu_name
if not args.tpu_zone:
snake_case__ : Optional[Any] = defaults.tpu_zone
if args.accelerate_version == "dev":
snake_case__ : Tuple = '''git+https://github.com/huggingface/accelerate.git'''
elif args.accelerate_version == "latest":
snake_case__ : Union[str, Any] = '''accelerate -U'''
elif isinstance(parse(args.accelerate_version ) , __lowerCAmelCase ):
snake_case__ : Any = F"""accelerate=={args.accelerate_version}"""
if not args.command_file and not args.command:
raise ValueError('''You must specify either a command file or a command to run on the pod.''' )
if args.command_file:
with open(args.command_file , '''r''' ) as f:
snake_case__ : str = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , __lowerCAmelCase ):
snake_case__ : List[Any] = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
snake_case__ : Optional[int] = ['''cd /usr/share''']
if args.install_accelerate:
new_cmd += [F"""pip install {args.accelerate_version}"""]
new_cmd += args.command
snake_case__ : Tuple = '''; '''.join(__lowerCAmelCase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
snake_case__ : str = ['''gcloud''']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F"""Running {' '.join(__lowerCAmelCase )}""" )
return
subprocess.run(__lowerCAmelCase )
print('''Successfully setup pod.''' )
def _a ( ):
"""simple docstring"""
snake_case__ : Dict = tpu_command_parser()
snake_case__ : Dict = parser.parse_args()
tpu_command_launcher(__lowerCAmelCase )
| 502 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
a_ = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
lowerCAmelCase__ : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowerCAmelCase__ : Optional[str] = field(
default=snake_case , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCAmelCase__ : Optional[str] = field(
default='NER' , metadata={'help': 'Task type to fine tune in training (e.g. NER, POS, etc)'} )
lowerCAmelCase__ : Optional[str] = field(
default=snake_case , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowerCAmelCase__ : bool = field(default=snake_case , metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowerCAmelCase__ : Optional[str] = field(
default=snake_case , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
lowerCAmelCase__ : str = field(
metadata={'help': 'The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'} )
lowerCAmelCase__ : Optional[str] = field(
default=snake_case , metadata={'help': 'Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'} , )
lowerCAmelCase__ : int = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase__ : bool = field(
default=snake_case , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def __lowerCAmelCase ( ) -> Any:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
" --overwrite_output_dir to overcome." )
__UpperCAmelCase = import_module("tasks" )
try:
__UpperCAmelCase = getattr(A_ , model_args.task_type )
__UpperCAmelCase = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F'''Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
F'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , A_ )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
__UpperCAmelCase = token_classification_task.get_labels(data_args.labels )
__UpperCAmelCase = dict(enumerate(A_ ) )
__UpperCAmelCase = len(A_ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=A_ , idalabel=A_ , labelaid={label: i for i, label in enumerate(A_ )} , cache_dir=model_args.cache_dir , )
__UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
__UpperCAmelCase = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=A_ , cache_dir=model_args.cache_dir , )
# Get datasets
__UpperCAmelCase = (
TokenClassificationDataset(
token_classification_task=A_ , data_dir=data_args.data_dir , tokenizer=A_ , labels=A_ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__UpperCAmelCase = (
TokenClassificationDataset(
token_classification_task=A_ , data_dir=data_args.data_dir , tokenizer=A_ , labels=A_ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(A_ : np.ndarray , A_ : np.ndarray ) -> Tuple[List[int], List[int]]:
__UpperCAmelCase = np.argmax(A_ , axis=2 )
__UpperCAmelCase , __UpperCAmelCase = preds.shape
__UpperCAmelCase = [[] for _ in range(A_ )]
__UpperCAmelCase = [[] for _ in range(A_ )]
for i in range(A_ ):
for j in range(A_ ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(A_ : EvalPrediction ) -> Dict:
__UpperCAmelCase , __UpperCAmelCase = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(A_ , A_ ),
"precision": precision_score(A_ , A_ ),
"recall": recall_score(A_ , A_ ),
"f1": fa_score(A_ , A_ ),
}
# Data collator
__UpperCAmelCase = DataCollatorWithPadding(A_ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__UpperCAmelCase = Trainer(
model=A_ , args=A_ , train_dataset=A_ , eval_dataset=A_ , compute_metrics=A_ , data_collator=A_ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__UpperCAmelCase = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
__UpperCAmelCase = trainer.evaluate()
__UpperCAmelCase = os.path.join(training_args.output_dir , "eval_results.txt" )
if trainer.is_world_process_zero():
with open(A_ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(" %s = %s" , A_ , A_ )
writer.write("%s = %s\n" % (key, value) )
results.update(A_ )
# Predict
if training_args.do_predict:
__UpperCAmelCase = TokenClassificationDataset(
token_classification_task=A_ , data_dir=data_args.data_dir , tokenizer=A_ , labels=A_ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = trainer.predict(A_ )
__UpperCAmelCase , __UpperCAmelCase = align_predictions(A_ , A_ )
__UpperCAmelCase = os.path.join(training_args.output_dir , "test_results.txt" )
if trainer.is_world_process_zero():
with open(A_ , "w" ) as writer:
for key, value in metrics.items():
logger.info(" %s = %s" , A_ , A_ )
writer.write("%s = %s\n" % (key, value) )
# Save predictions
__UpperCAmelCase = os.path.join(training_args.output_dir , "test_predictions.txt" )
if trainer.is_world_process_zero():
with open(A_ , "w" ) as writer:
with open(os.path.join(data_args.data_dir , "test.txt" ) , "r" ) as f:
token_classification_task.write_predictions_to_file(A_ , A_ , A_ )
return results
def __lowerCAmelCase ( A_ : List[Any] ) -> List[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 221 | import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase__ :
"""simple docstring"""
@staticmethod
def _UpperCAmelCase ( *__lowerCAmelCase: Optional[int] , **__lowerCAmelCase: Dict ) -> Optional[int]:
'''simple docstring'''
pass
def __lowerCAmelCase ( A_ : Image ) -> str:
__UpperCAmelCase = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def _UpperCAmelCase ( self: List[Any] , __lowerCAmelCase: Dict , __lowerCAmelCase: List[str] , __lowerCAmelCase: List[Any] ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase = DepthEstimationPipeline(model=__lowerCAmelCase , image_processor=__lowerCAmelCase )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def _UpperCAmelCase ( self: Optional[int] , __lowerCAmelCase: Optional[int] , __lowerCAmelCase: List[Any] ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png" )
self.assertEqual({"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )} , __lowerCAmelCase )
import datasets
__UpperCAmelCase = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
__UpperCAmelCase = depth_estimator(
[
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
] )
self.assertEqual(
[
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
] , __lowerCAmelCase , )
@require_tf
@unittest.skip("Depth estimation is not implemented in TF" )
def _UpperCAmelCase ( self: Union[str, Any] ) -> List[str]:
'''simple docstring'''
pass
@slow
@require_torch
def _UpperCAmelCase ( self: Optional[Any] ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase = "Intel/dpt-large"
__UpperCAmelCase = pipeline("depth-estimation" , model=__lowerCAmelCase )
__UpperCAmelCase = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg" )
__UpperCAmelCase = hashimage(outputs["depth"] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item() ) , 2.662 )
@require_torch
def _UpperCAmelCase ( self: Union[str, Any] ) -> Tuple:
'''simple docstring'''
self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT" )
| 221 | 1 |
import os
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
with open(os.path.dirname(lowercase ) + """/grid.txt""" ) as f:
snake_case : Tuple = [] # noqa: E741
for _ in range(20 ):
l.append([int(lowercase ) for x in f.readline().split()] )
snake_case : Optional[Any] = 0
# right
for i in range(20 ):
for j in range(17 ):
snake_case : List[Any] = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
snake_case : Tuple = temp
# down
for i in range(17 ):
for j in range(20 ):
snake_case : Any = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
snake_case : str = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
snake_case : int = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
snake_case : int = temp
# diagonal 2
for i in range(17 ):
for j in range(3 ,20 ):
snake_case : Any = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
snake_case : Any = temp
return maximum
if __name__ == "__main__":
print(solution())
| 705 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json',
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """gpt_neox_japanese"""
def __init__( self , A=3_2_0_0_0 , A=2_5_6_0 , A=3_2 , A=3_2 , A=4 , A="gelu" , A=1.00 , A=1_0_0_0_0 , A=2_0_4_8 , A=0.02 , A=1e-5 , A=True , A=3_1_9_9_6 , A=3_1_9_9_9 , A=0.1 , A=0.0 , **A , ) -> str:
super().__init__(bos_token_id=A , eos_token_id=A , **A )
snake_case : Optional[Any] = vocab_size
snake_case : Optional[Any] = max_position_embeddings
snake_case : Union[str, Any] = hidden_size
snake_case : Union[str, Any] = num_hidden_layers
snake_case : Optional[int] = num_attention_heads
snake_case : Optional[int] = intermediate_multiple_size
snake_case : int = hidden_act
snake_case : str = rotary_pct
snake_case : Optional[Any] = rotary_emb_base
snake_case : Any = initializer_range
snake_case : Any = layer_norm_eps
snake_case : Optional[Any] = use_cache
snake_case : Tuple = attention_dropout
snake_case : Tuple = hidden_dropout
| 684 | 0 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase__ ( _UpperCamelCase ):
__lowerCamelCase = (DEISMultistepScheduler,)
__lowerCamelCase = (("num_inference_steps", 25),)
def lowerCamelCase_ ( self : Optional[Any] , **__a : str ):
'''simple docstring'''
lowerCamelCase__: Union[str, Any] = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0_001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""solver_order""": 2,
}
config.update(**__a )
return config
def lowerCamelCase_ ( self : Optional[Any] , __a : Any=0 , **__a : Any ):
'''simple docstring'''
lowerCamelCase__: int = dict(self.forward_default_kwargs )
lowerCamelCase__: Any = kwargs.pop("""num_inference_steps""" , __a )
lowerCamelCase__: Any = self.dummy_sample
lowerCamelCase__: Any = 0.1 * sample
lowerCamelCase__: Dict = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCamelCase__: Optional[int] = self.get_scheduler_config(**__a )
lowerCamelCase__: List[str] = scheduler_class(**__a )
scheduler.set_timesteps(__a )
# copy over dummy past residuals
lowerCamelCase__: Tuple = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__a )
lowerCamelCase__: str = scheduler_class.from_pretrained(__a )
new_scheduler.set_timesteps(__a )
# copy over dummy past residuals
lowerCamelCase__: Tuple = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCamelCase__ , lowerCamelCase__: Any = sample, sample
for t in range(__a , time_step + scheduler.config.solver_order + 1 ):
lowerCamelCase__: List[str] = scheduler.step(__a , __a , __a , **__a ).prev_sample
lowerCamelCase__: Optional[Any] = new_scheduler.step(__a , __a , __a , **__a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : List[Any] , __a : List[str]=0 , **__a : Optional[int] ):
'''simple docstring'''
lowerCamelCase__: List[str] = dict(self.forward_default_kwargs )
lowerCamelCase__: List[Any] = kwargs.pop("""num_inference_steps""" , __a )
lowerCamelCase__: Tuple = self.dummy_sample
lowerCamelCase__: Optional[Any] = 0.1 * sample
lowerCamelCase__: Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCamelCase__: Optional[Any] = self.get_scheduler_config()
lowerCamelCase__: Optional[int] = scheduler_class(**__a )
scheduler.set_timesteps(__a )
# copy over dummy past residuals (must be after setting timesteps)
lowerCamelCase__: List[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__a )
lowerCamelCase__: int = scheduler_class.from_pretrained(__a )
# copy over dummy past residuals
new_scheduler.set_timesteps(__a )
# copy over dummy past residual (must be after setting timesteps)
lowerCamelCase__: List[str] = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCamelCase__: Tuple = scheduler.step(__a , __a , __a , **__a ).prev_sample
lowerCamelCase__: Optional[Any] = new_scheduler.step(__a , __a , __a , **__a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowerCamelCase_ ( self : str , __a : str=None , **__a : List[Any] ):
'''simple docstring'''
if scheduler is None:
lowerCamelCase__: Optional[Any] = self.scheduler_classes[0]
lowerCamelCase__: Union[str, Any] = self.get_scheduler_config(**__a )
lowerCamelCase__: Union[str, Any] = scheduler_class(**__a )
lowerCamelCase__: Optional[Any] = self.scheduler_classes[0]
lowerCamelCase__: int = self.get_scheduler_config(**__a )
lowerCamelCase__: Optional[int] = scheduler_class(**__a )
lowerCamelCase__: Union[str, Any] = 10
lowerCamelCase__: Any = self.dummy_model()
lowerCamelCase__: int = self.dummy_sample_deter
scheduler.set_timesteps(__a )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase__: Tuple = model(__a , __a )
lowerCamelCase__: int = scheduler.step(__a , __a , __a ).prev_sample
return sample
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowerCamelCase__: int = dict(self.forward_default_kwargs )
lowerCamelCase__: Optional[Any] = kwargs.pop("""num_inference_steps""" , __a )
for scheduler_class in self.scheduler_classes:
lowerCamelCase__: List[str] = self.get_scheduler_config()
lowerCamelCase__: Optional[int] = scheduler_class(**__a )
lowerCamelCase__: int = self.dummy_sample
lowerCamelCase__: Optional[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(__a , """set_timesteps""" ):
scheduler.set_timesteps(__a )
elif num_inference_steps is not None and not hasattr(__a , """set_timesteps""" ):
lowerCamelCase__: str = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowerCamelCase__: List[str] = [residual + 0.2, residual + 0.15, residual + 0.10]
lowerCamelCase__: Union[str, Any] = dummy_past_residuals[: scheduler.config.solver_order]
lowerCamelCase__: List[str] = scheduler.timesteps[5]
lowerCamelCase__: Dict = scheduler.timesteps[6]
lowerCamelCase__: List[str] = scheduler.step(__a , __a , __a , **__a ).prev_sample
lowerCamelCase__: List[Any] = scheduler.step(__a , __a , __a , **__a ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__: Optional[Any] = DEISMultistepScheduler(**self.get_scheduler_config() )
lowerCamelCase__: Any = self.full_loop(scheduler=__a )
lowerCamelCase__: Tuple = torch.mean(torch.abs(__a ) )
assert abs(result_mean.item() - 0.23_916 ) < 1e-3
lowerCamelCase__: int = DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowerCamelCase__: List[Any] = DPMSolverMultistepScheduler.from_config(scheduler.config )
lowerCamelCase__: Dict = UniPCMultistepScheduler.from_config(scheduler.config )
lowerCamelCase__: Optional[int] = DEISMultistepScheduler.from_config(scheduler.config )
lowerCamelCase__: Tuple = self.full_loop(scheduler=__a )
lowerCamelCase__: Optional[Any] = torch.mean(torch.abs(__a ) )
assert abs(result_mean.item() - 0.23_916 ) < 1e-3
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=__a )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
self.check_over_configs(thresholding=__a )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__a , prediction_type=__a , sample_max_value=__a , algorithm_type="""deis""" , solver_order=__a , solver_type=__a , )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__a )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__a , solver_type=__a , prediction_type=__a , algorithm_type=__a , )
lowerCamelCase__: Dict = self.full_loop(
solver_order=__a , solver_type=__a , prediction_type=__a , algorithm_type=__a , )
assert not torch.isnan(__a ).any(), "Samples have nan numbers"
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
self.check_over_configs(lower_order_final=__a )
self.check_over_configs(lower_order_final=__a )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=__a , time_step=0 )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__: List[str] = self.full_loop()
lowerCamelCase__: Dict = torch.mean(torch.abs(__a ) )
assert abs(result_mean.item() - 0.23_916 ) < 1e-3
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowerCamelCase__: str = self.full_loop(prediction_type="""v_prediction""" )
lowerCamelCase__: Any = torch.mean(torch.abs(__a ) )
assert abs(result_mean.item() - 0.091 ) < 1e-3
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowerCamelCase__: int = self.scheduler_classes[0]
lowerCamelCase__: Optional[int] = self.get_scheduler_config(thresholding=__a , dynamic_thresholding_ratio=0 )
lowerCamelCase__: Union[str, Any] = scheduler_class(**__a )
lowerCamelCase__: str = 10
lowerCamelCase__: List[str] = self.dummy_model()
lowerCamelCase__: Dict = self.dummy_sample_deter.half()
scheduler.set_timesteps(__a )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase__: str = model(__a , __a )
lowerCamelCase__: Union[str, Any] = scheduler.step(__a , __a , __a ).prev_sample
assert sample.dtype == torch.floataa
| 306 |
"""simple docstring"""
import re
import string
import numpy as np
import datasets
a__ : Dict = """
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
"""
a__ : List[str] = """
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
25.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
50.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
75.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results[\"exact_match\"], 1))
100.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]
>>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
33.3
"""
a__ : int = """
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
def _lowerCamelCase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , reference_urls=[] , )
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=False , __magic_name__=False , __magic_name__=False , ):
"""simple docstring"""
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
_lowerCAmelCase = np.array([re.sub(__magic_name__ , '' , __magic_name__ ) for x in predictions] )
_lowerCAmelCase = np.array([re.sub(__magic_name__ , '' , __magic_name__ ) for x in references] )
else:
_lowerCAmelCase = np.asarray(__magic_name__ )
_lowerCAmelCase = np.asarray(__magic_name__ )
if ignore_case:
_lowerCAmelCase = np.char.lower(__magic_name__ )
_lowerCAmelCase = np.char.lower(__magic_name__ )
if ignore_punctuation:
_lowerCAmelCase = string.punctuation.maketrans('' , '' , string.punctuation )
_lowerCAmelCase = np.char.translate(__magic_name__ , table=__magic_name__ )
_lowerCAmelCase = np.char.translate(__magic_name__ , table=__magic_name__ )
if ignore_numbers:
_lowerCAmelCase = string.digits.maketrans('' , '' , string.digits )
_lowerCAmelCase = np.char.translate(__magic_name__ , table=__magic_name__ )
_lowerCAmelCase = np.char.translate(__magic_name__ , table=__magic_name__ )
_lowerCAmelCase = predictions == references
return {"exact_match": np.mean(__magic_name__ ) * 1_0_0}
| 589 | 0 |
from __future__ import annotations
class snake_case_ :
def __init__( self : List[Any] , _snake_case : int = 0 )->Dict:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = key
def UpperCAmelCase__ ( self : Any , _snake_case : str , _snake_case : int )->list[str]:
'''simple docstring'''
assert isinstance(_snake_case , _snake_case ) and isinstance(_snake_case , _snake_case )
__lowerCAmelCase : Optional[Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_snake_case ) ^ key ) for ch in content]
def UpperCAmelCase__ ( self : Tuple , _snake_case : str , _snake_case : int )->list[str]:
'''simple docstring'''
assert isinstance(_snake_case , _snake_case ) and isinstance(_snake_case , _snake_case )
__lowerCAmelCase : Dict = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_snake_case ) ^ key ) for ch in content]
def UpperCAmelCase__ ( self : Optional[int] , _snake_case : str , _snake_case : int = 0 )->str:
'''simple docstring'''
assert isinstance(_snake_case , _snake_case ) and isinstance(_snake_case , _snake_case )
__lowerCAmelCase : Any = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
__lowerCAmelCase : List[str] = """"""
for ch in content:
ans += chr(ord(_snake_case ) ^ key )
return ans
def UpperCAmelCase__ ( self : List[Any] , _snake_case : str , _snake_case : int = 0 )->str:
'''simple docstring'''
assert isinstance(_snake_case , _snake_case ) and isinstance(_snake_case , _snake_case )
__lowerCAmelCase : Dict = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
__lowerCAmelCase : List[str] = """"""
for ch in content:
ans += chr(ord(_snake_case ) ^ key )
return ans
def UpperCAmelCase__ ( self : Optional[Any] , _snake_case : str , _snake_case : int = 0 )->bool:
'''simple docstring'''
assert isinstance(_snake_case , _snake_case ) and isinstance(_snake_case , _snake_case )
try:
with open(_snake_case ) as fin, open("""encrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(_snake_case , _snake_case ) )
except OSError:
return False
return True
def UpperCAmelCase__ ( self : Union[str, Any] , _snake_case : str , _snake_case : int )->bool:
'''simple docstring'''
assert isinstance(_snake_case , _snake_case ) and isinstance(_snake_case , _snake_case )
try:
with open(_snake_case ) as fin, open("""decrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(_snake_case , _snake_case ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful") | 240 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = '▁'
_UpperCAmelCase = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
'tokenizer_config_file': 'tokenizer_config.json',
}
_UpperCAmelCase = {
'vocab_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json',
},
'spm_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_config_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json',
},
}
_UpperCAmelCase = {
'facebook/m2m100_418M': 1024,
}
# fmt: off
_UpperCAmelCase = {
'm2m100': ['af', 'am', 'ar', 'ast', 'az', 'ba', 'be', 'bg', 'bn', 'br', 'bs', 'ca', 'ceb', 'cs', 'cy', 'da', 'de', 'el', 'en', 'es', 'et', 'fa', 'ff', 'fi', 'fr', 'fy', 'ga', 'gd', 'gl', 'gu', 'ha', 'he', 'hi', 'hr', 'ht', 'hu', 'hy', 'id', 'ig', 'ilo', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'km', 'kn', 'ko', 'lb', 'lg', 'ln', 'lo', 'lt', 'lv', 'mg', 'mk', 'ml', 'mn', 'mr', 'ms', 'my', 'ne', 'nl', 'no', 'ns', 'oc', 'or', 'pa', 'pl', 'ps', 'pt', 'ro', 'ru', 'sd', 'si', 'sk', 'sl', 'so', 'sq', 'sr', 'ss', 'su', 'sv', 'sw', 'ta', 'th', 'tl', 'tn', 'tr', 'uk', 'ur', 'uz', 'vi', 'wo', 'xh', 'yi', 'yo', 'zh', 'zu'],
'wmt21': ['en', 'ha', 'is', 'ja', 'cs', 'ru', 'zh', 'de']
}
class snake_case_ ( __lowercase ):
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = ['input_ids', 'attention_mask']
A_ = []
A_ = []
def __init__( self : List[Any] , _snake_case : Optional[Any] , _snake_case : Tuple , _snake_case : Optional[Any]=None , _snake_case : List[Any]=None , _snake_case : Dict="<s>" , _snake_case : List[str]="</s>" , _snake_case : Optional[Any]="</s>" , _snake_case : List[Any]="<pad>" , _snake_case : List[Any]="<unk>" , _snake_case : Dict="m2m100" , _snake_case : Optional[Dict[str, Any]] = None , _snake_case : Any=8 , **_snake_case : str , )->None:
'''simple docstring'''
__lowerCAmelCase : Any = {} if sp_model_kwargs is None else sp_model_kwargs
__lowerCAmelCase : Any = language_codes
__lowerCAmelCase : Optional[Any] = FAIRSEQ_LANGUAGE_CODES[language_codes]
__lowerCAmelCase : List[Any] = {lang_code: F'''__{lang_code}__''' for lang_code in fairseq_language_code}
__lowerCAmelCase : Dict = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(_snake_case )
for lang_code in fairseq_language_code
if self.get_lang_token(_snake_case ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=_snake_case , tgt_lang=_snake_case , bos_token=_snake_case , eos_token=_snake_case , sep_token=_snake_case , unk_token=_snake_case , pad_token=_snake_case , language_codes=_snake_case , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=_snake_case , **_snake_case , )
__lowerCAmelCase : int = vocab_file
__lowerCAmelCase : Optional[int] = load_json(_snake_case )
__lowerCAmelCase : int = {v: k for k, v in self.encoder.items()}
__lowerCAmelCase : Tuple = spm_file
__lowerCAmelCase : str = load_spm(_snake_case , self.sp_model_kwargs )
__lowerCAmelCase : List[Any] = len(self.encoder )
__lowerCAmelCase : int = {
self.get_lang_token(_snake_case ): self.encoder_size + i for i, lang_code in enumerate(_snake_case )
}
__lowerCAmelCase : Union[str, Any] = {lang_code: self.encoder_size + i for i, lang_code in enumerate(_snake_case )}
__lowerCAmelCase : Union[str, Any] = {v: k for k, v in self.lang_token_to_id.items()}
__lowerCAmelCase : int = src_lang if src_lang is not None else """en"""
__lowerCAmelCase : Dict = tgt_lang
__lowerCAmelCase : Any = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
__lowerCAmelCase : List[Any] = num_madeup_words
@property
def UpperCAmelCase__ ( self : List[Any] )->int:
'''simple docstring'''
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def UpperCAmelCase__ ( self : Tuple )->str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def UpperCAmelCase__ ( self : List[Any] , _snake_case : str )->None:
'''simple docstring'''
__lowerCAmelCase : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCAmelCase__ ( self : Tuple , _snake_case : str )->List[str]:
'''simple docstring'''
return self.sp_model.encode(_snake_case , out_type=_snake_case )
def UpperCAmelCase__ ( self : Union[str, Any] , _snake_case : str )->Dict:
'''simple docstring'''
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(_snake_case , self.encoder[self.unk_token] )
def UpperCAmelCase__ ( self : Any , _snake_case : int )->str:
'''simple docstring'''
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(_snake_case , self.unk_token )
def UpperCAmelCase__ ( self : Union[str, Any] , _snake_case : List[str] )->Tuple:
'''simple docstring'''
__lowerCAmelCase : int = []
__lowerCAmelCase : Any = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_snake_case ) + token
__lowerCAmelCase : Optional[Any] = []
else:
current_sub_tokens.append(_snake_case )
out_string += self.sp_model.decode(_snake_case )
return out_string.strip()
def UpperCAmelCase__ ( self : Optional[Any] , _snake_case : List[int] , _snake_case : Optional[List[int]] = None , _snake_case : bool = False )->List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case , token_ids_a=_snake_case , already_has_special_tokens=_snake_case )
__lowerCAmelCase : str = [1] * len(self.prefix_tokens )
__lowerCAmelCase : Optional[int] = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_snake_case )) + suffix_ones
return prefix_ones + ([0] * len(_snake_case )) + ([0] * len(_snake_case )) + suffix_ones
def UpperCAmelCase__ ( self : Any , _snake_case : List[int] , _snake_case : Optional[List[int]] = None )->List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCAmelCase__ ( self : Optional[int] )->Dict:
'''simple docstring'''
__lowerCAmelCase : Dict = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[Any] )->Dict:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = self.__dict__.copy()
__lowerCAmelCase : Dict = None
return state
def __setstate__( self : Optional[int] , _snake_case : Dict )->None:
'''simple docstring'''
__lowerCAmelCase : str = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__lowerCAmelCase : Dict = {}
__lowerCAmelCase : Optional[Any] = load_spm(self.spm_file , self.sp_model_kwargs )
def UpperCAmelCase__ ( self : Union[str, Any] , _snake_case : str , _snake_case : Optional[str] = None )->Tuple[str]:
'''simple docstring'''
__lowerCAmelCase : List[str] = Path(_snake_case )
if not save_dir.is_dir():
raise OSError(F'''{save_directory} should be a directory''' )
__lowerCAmelCase : int = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
__lowerCAmelCase : int = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder , _snake_case )
if os.path.abspath(self.spm_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _snake_case )
elif not os.path.isfile(self.spm_file ):
with open(_snake_case , """wb""" ) as fi:
__lowerCAmelCase : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (str(_snake_case ), str(_snake_case ))
def UpperCAmelCase__ ( self : Dict , _snake_case : List[str] , _snake_case : str = "en" , _snake_case : Optional[List[str]] = None , _snake_case : str = "ro" , **_snake_case : List[Any] , )->BatchEncoding:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = src_lang
__lowerCAmelCase : Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(_snake_case , _snake_case , **_snake_case )
def UpperCAmelCase__ ( self : str , _snake_case : Tuple , _snake_case : Optional[str] , _snake_case : Optional[str] , **_snake_case : Optional[int] )->Optional[Any]:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
__lowerCAmelCase : Tuple = src_lang
__lowerCAmelCase : Dict = self(_snake_case , add_special_tokens=_snake_case , **_snake_case )
__lowerCAmelCase : List[str] = self.get_lang_id(_snake_case )
__lowerCAmelCase : Optional[int] = tgt_lang_id
return inputs
def UpperCAmelCase__ ( self : Any )->str:
'''simple docstring'''
self.set_src_lang_special_tokens(self.src_lang )
def UpperCAmelCase__ ( self : Optional[Any] )->int:
'''simple docstring'''
self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCAmelCase__ ( self : Union[str, Any] , _snake_case : str )->None:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = self.get_lang_token(_snake_case )
__lowerCAmelCase : Union[str, Any] = self.lang_token_to_id[lang_token]
__lowerCAmelCase : int = [self.cur_lang_id]
__lowerCAmelCase : Tuple = [self.eos_token_id]
def UpperCAmelCase__ ( self : str , _snake_case : str )->None:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = self.get_lang_token(_snake_case )
__lowerCAmelCase : Dict = self.lang_token_to_id[lang_token]
__lowerCAmelCase : Union[str, Any] = [self.cur_lang_id]
__lowerCAmelCase : Union[str, Any] = [self.eos_token_id]
def UpperCAmelCase__ ( self : int , _snake_case : str )->str:
'''simple docstring'''
return self.lang_code_to_token[lang]
def UpperCAmelCase__ ( self : Dict , _snake_case : str )->int:
'''simple docstring'''
__lowerCAmelCase : List[Any] = self.get_lang_token(_snake_case )
return self.lang_token_to_id[lang_token]
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
__lowerCAmelCase : Union[str, Any] = sentencepiece.SentencePieceProcessor(**SCREAMING_SNAKE_CASE )
spm.Load(str(SCREAMING_SNAKE_CASE ) )
return spm
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :str ) -> Union[Dict, List]:
with open(SCREAMING_SNAKE_CASE , """r""" ) as f:
return json.load(SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :str ) -> None:
with open(SCREAMING_SNAKE_CASE , """w""" ) as f:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , indent=2 ) | 240 | 1 |
'''simple docstring'''
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
lowerCAmelCase__ = getLogger(__name__)
lowerCAmelCase__ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def _A ( A__ , A__ , A__ , A__ = 8 , A__ = DEFAULT_DEVICE , A__=False , A__="summarization" , A__=None , **A__ , ):
"""simple docstring"""
__lowercase = Path(A__ ).open('''w''' , encoding='''utf-8''' )
__lowercase = str(A__ )
__lowercase = AutoModelForSeqaSeqLM.from_pretrained(A__ ).to(A__ )
if fpaa:
__lowercase = model.half()
__lowercase = AutoTokenizer.from_pretrained(A__ )
logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
__lowercase = time.time()
# update config with task specific params
use_task_specific_params(A__ , A__ )
if prefix is None:
__lowercase = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
for examples_chunk in tqdm(list(chunks(A__ , A__ ) ) ):
__lowercase = [prefix + text for text in examples_chunk]
__lowercase = tokenizer(A__ , return_tensors='''pt''' , truncation=A__ , padding='''longest''' ).to(A__ )
__lowercase = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **A__ , )
__lowercase = tokenizer.batch_decode(A__ , skip_special_tokens=A__ , clean_up_tokenization_spaces=A__ )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
__lowercase = int(time.time() - start_time ) # seconds
__lowercase = len(A__ )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def _A ( ):
"""simple docstring"""
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def _A ( A__=True ):
"""simple docstring"""
__lowercase = argparse.ArgumentParser()
parser.add_argument('''model_name''' , type=A__ , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' , type=A__ , help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' , type=A__ , help='''where to save summaries''' )
parser.add_argument('''--reference_path''' , type=A__ , required=A__ , help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' , type=A__ , required=A__ , default='''metrics.json''' , help='''where to save metrics''' )
parser.add_argument('''--device''' , type=A__ , required=A__ , default=A__ , help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' , type=A__ , required=A__ , default=A__ , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' , type=A__ , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=A__ , default=8 , required=A__ , help='''batch size''' )
parser.add_argument(
'''--n_obs''' , type=A__ , default=-1 , required=A__ , help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' , nargs='''?''' , type=A__ , const=datetime_now() , help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__lowercase , __lowercase = parser.parse_known_args()
__lowercase = parse_numeric_n_bool_cl_kwargs(A__ )
if parsed_args and verbose:
print(F"parsed the following generate kwargs: {parsed_args}" )
__lowercase = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__lowercase = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=A__ )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F"score_path {args.score_path} will be overwritten unless you type ctrl-c." )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
__lowercase = generate_summaries_or_translations(
A__ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **A__ , )
if args.reference_path is None:
return {}
# Compute scores
__lowercase = calculate_bleu if '''translation''' in args.task else calculate_rouge
__lowercase = [x.rstrip() for x in open(args.save_path ).readlines()]
__lowercase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(A__ )]
__lowercase = score_fn(A__ , A__ )
scores.update(A__ )
if args.dump_args:
scores.update(A__ )
if args.info:
__lowercase = args.info
if verbose:
print(A__ )
if args.score_path is not None:
json.dump(A__ , open(args.score_path , '''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 41 |
"""simple docstring"""
from jiwer import compute_measures
import datasets
A_ = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
A_ = """\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
"""
A_ = """
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> wer = datasets.load_metric(\"wer\")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
def UpperCAmelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
] , )
def UpperCAmelCase__ ( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=False ):
if concatenate_texts:
return compute_measures(UpperCAmelCase , UpperCAmelCase )["wer"]
else:
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for prediction, reference in zip(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = compute_measures(UpperCAmelCase , UpperCAmelCase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 29 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Dict = logging.get_logger(__name__)
__UpperCamelCase : Any = {
"""google/vivit-b-16x2-kinetics400""": (
"""https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"""
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
__a ="vivit"
def __init__( self , lowerCamelCase=224 , lowerCamelCase=32 , lowerCamelCase=[2, 16, 16] , lowerCamelCase=3 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu_fast" , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.02 , lowerCamelCase=1e-06 , lowerCamelCase=True , **lowerCamelCase , ) ->Tuple:
'''simple docstring'''
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = layer_norm_eps
__a = image_size
__a = num_frames
__a = tubelet_size
__a = num_channels
__a = qkv_bias
super().__init__(**lowerCamelCase ) | 270 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
def __UpperCamelCase ( self ) ->int:
'''simple docstring'''
__a = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase , 'tf_padding' ) )
self.parent.assertTrue(hasattr(lowerCamelCase , 'depth_multiplier' ) )
class __SCREAMING_SNAKE_CASE :
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=3 , lowerCamelCase=32 , lowerCamelCase=0.25 , lowerCamelCase=8 , lowerCamelCase=8 , lowerCamelCase=6 , lowerCamelCase=32 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase="relu6" , lowerCamelCase=1280 , lowerCamelCase=0.1 , lowerCamelCase=0.02 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=10 , lowerCamelCase=None , ) ->Optional[int]:
'''simple docstring'''
__a = parent
__a = batch_size
__a = num_channels
__a = image_size
__a = depth_multiplier
__a = depth_divisible_by
__a = min_depth
__a = expand_ratio
__a = tf_padding
__a = output_stride
__a = first_layer_is_expansion
__a = finegrained_output
__a = hidden_act
__a = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
__a = classifier_dropout_prob
__a = use_labels
__a = is_training
__a = num_labels
__a = initializer_range
__a = scope
def __UpperCamelCase ( self ) ->Optional[Any]:
'''simple docstring'''
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.num_labels )
__a = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__a = self.get_config()
return config, pixel_values, labels, pixel_labels
def __UpperCamelCase ( self ) ->Dict:
'''simple docstring'''
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) ->Tuple:
'''simple docstring'''
__a = MobileNetVaModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) ->Dict:
'''simple docstring'''
__a = self.num_labels
__a = MobileNetVaForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) ->Optional[int]:
'''simple docstring'''
__a = self.num_labels
__a = MobileNetVaForSemanticSegmentation(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__a = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __UpperCamelCase ( self ) ->Any:
'''simple docstring'''
__a = self.prepare_config_and_inputs()
__a , __a , __a , __a = config_and_inputs
__a = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__a =(
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__a =(
{
"feature-extraction": MobileNetVaModel,
"image-classification": MobileNetVaForImageClassification,
"image-segmentation": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__a =False
__a =False
__a =False
__a =False
def __UpperCamelCase ( self ) ->List[Any]:
'''simple docstring'''
__a = MobileNetVaModelTester(self )
__a = MobileNetVaConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase )
def __UpperCamelCase ( self ) ->str:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileNetV2 does not use inputs_embeds' )
def __UpperCamelCase ( self ) ->Any:
'''simple docstring'''
pass
@unittest.skip(reason='MobileNetV2 does not support input and output embeddings' )
def __UpperCamelCase ( self ) ->int:
'''simple docstring'''
pass
@unittest.skip(reason='MobileNetV2 does not output attentions' )
def __UpperCamelCase ( self ) ->Any:
'''simple docstring'''
pass
def __UpperCamelCase ( self ) ->str:
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(lowerCamelCase )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def __UpperCamelCase ( self ) ->Optional[Any]:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def __UpperCamelCase ( self ) ->Dict:
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
__a = outputs.hidden_states
__a = 16
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def __UpperCamelCase ( self ) ->Union[str, Any]:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
def __UpperCamelCase ( self ) ->List[str]:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase )
@slow
def __UpperCamelCase ( self ) ->Optional[Any]:
'''simple docstring'''
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = MobileNetVaModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def __UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
__a = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self ) ->Optional[Any]:
'''simple docstring'''
return (
MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v2_1.0_224' ) if is_vision_available() else None
)
@slow
def __UpperCamelCase ( self ) ->Any:
'''simple docstring'''
__a = MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v2_1.0_224' ).to(lowerCamelCase )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=lowerCamelCase , return_tensors='pt' ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__a = model(**lowerCamelCase )
# verify the logits
__a = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
__a = torch.tensor([0.2445, -1.1993, 0.1905] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1e-4 ) )
@slow
def __UpperCamelCase ( self ) ->int:
'''simple docstring'''
__a = MobileNetVaForSemanticSegmentation.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
__a = model.to(lowerCamelCase )
__a = MobileNetVaImageProcessor.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
__a = prepare_img()
__a = image_processor(images=lowerCamelCase , return_tensors='pt' ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__a = model(**lowerCamelCase )
__a = outputs.logits
# verify the logits
__a = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , lowerCamelCase )
__a = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=lowerCamelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase , atol=1e-4 ) ) | 270 | 1 |
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
SCREAMING_SNAKE_CASE__ : Any = 4
SCREAMING_SNAKE_CASE__ : Optional[Any] = 3
class snake_case ( UpperCamelCase_ ):
pass
def _a ( lowercase__ : List[str] ):
'''simple docstring'''
for shard in shards:
for i in range(lowercase__ ):
yield {"i": i, "shard": shard}
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = int(os.environ['RANK'] )
SCREAMING_SNAKE_CASE__ : List[Any] = int(os.environ['WORLD_SIZE'] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ArgumentParser()
parser.add_argument('--streaming' , type=lowercase__ )
parser.add_argument('--local_rank' , type=lowercase__ )
parser.add_argument('--num_workers' , type=lowercase__ , default=0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args()
SCREAMING_SNAKE_CASE__ : List[str] = args.streaming
SCREAMING_SNAKE_CASE__ : int = args.num_workers
SCREAMING_SNAKE_CASE__ : Optional[int] = {'shards': [f'''shard_{shard_idx}''' for shard_idx in range(lowercase__ )]}
SCREAMING_SNAKE_CASE__ : Any = IterableDataset.from_generator(lowercase__ , gen_kwargs=lowercase__ )
if not streaming:
SCREAMING_SNAKE_CASE__ : int = Dataset.from_list(list(lowercase__ ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = split_dataset_by_node(lowercase__ , rank=lowercase__ , world_size=lowercase__ )
SCREAMING_SNAKE_CASE__ : str = torch.utils.data.DataLoader(lowercase__ , num_workers=lowercase__ )
SCREAMING_SNAKE_CASE__ : Tuple = NUM_SHARDS * NUM_ITEMS_PER_SHARD
SCREAMING_SNAKE_CASE__ : Tuple = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
SCREAMING_SNAKE_CASE__ : List[str] = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f'''local_size {local_size} != expected_local_size {expected_local_size}''' )
if __name__ == "__main__":
main()
| 85 |
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
lowerCamelCase : List[Any] =logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None ) -> str:
UpperCamelCase__ : Any = tesseract_config if tesseract_config is not None else ""
# apply OCR
UpperCamelCase__ : int = to_pil_image(__lowerCAmelCase )
UpperCamelCase__ , UpperCamelCase__ : Dict = pil_image.size
UpperCamelCase__ : Optional[Any] = pytesseract.image_to_data(__lowerCAmelCase , lang=__lowerCAmelCase , output_type="dict" , config=__lowerCAmelCase )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : List[str] = data["text"], data["left"], data["top"], data["width"], data["height"]
# filter empty words and corresponding coordinates
UpperCamelCase__ : Tuple = [idx for idx, word in enumerate(__lowerCAmelCase ) if not word.strip()]
UpperCamelCase__ : Tuple = [word for idx, word in enumerate(__lowerCAmelCase ) if idx not in irrelevant_indices]
UpperCamelCase__ : Union[str, Any] = [coord for idx, coord in enumerate(__lowerCAmelCase ) if idx not in irrelevant_indices]
UpperCamelCase__ : List[str] = [coord for idx, coord in enumerate(__lowerCAmelCase ) if idx not in irrelevant_indices]
UpperCamelCase__ : Union[str, Any] = [coord for idx, coord in enumerate(__lowerCAmelCase ) if idx not in irrelevant_indices]
UpperCamelCase__ : str = [coord for idx, coord in enumerate(__lowerCAmelCase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
UpperCamelCase__ : List[Any] = []
for x, y, w, h in zip(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ : Optional[int] = [x, y, x + w, y + h]
actual_boxes.append(__lowerCAmelCase )
# finally, normalize the bounding boxes
UpperCamelCase__ : int = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class __a ( A__ ):
_lowerCAmelCase : int = ['''pixel_values''']
def __init__( self : Dict , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Dict[str, int] = None , SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Optional[str] = None , SCREAMING_SNAKE_CASE : Optional[str] = "" , **SCREAMING_SNAKE_CASE : Any , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = size if size is not None else {"height": 2_24, "width": 2_24}
UpperCamelCase__ : str = get_size_dict(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = do_resize
UpperCamelCase__ : Union[str, Any] = size
UpperCamelCase__ : List[str] = resample
UpperCamelCase__ : Dict = apply_ocr
UpperCamelCase__ : str = ocr_lang
UpperCamelCase__ : List[str] = tesseract_config
def __lowercase ( self : List[str] , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : Dict[str, int] , SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE : Dict , ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(F'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
UpperCamelCase__ : Union[str, Any] = (size["height"], size["width"])
return resize(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : ImageInput , SCREAMING_SNAKE_CASE : bool = None , SCREAMING_SNAKE_CASE : Dict[str, int] = None , SCREAMING_SNAKE_CASE : PILImageResampling = None , SCREAMING_SNAKE_CASE : bool = None , SCREAMING_SNAKE_CASE : Optional[str] = None , SCREAMING_SNAKE_CASE : Optional[str] = None , SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE : ChannelDimension = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE : Tuple , ):
'''simple docstring'''
UpperCamelCase__ : List[str] = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ : Tuple = size if size is not None else self.size
UpperCamelCase__ : Any = get_size_dict(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = resample if resample is not None else self.resample
UpperCamelCase__ : Any = apply_ocr if apply_ocr is not None else self.apply_ocr
UpperCamelCase__ : Any = ocr_lang if ocr_lang is not None else self.ocr_lang
UpperCamelCase__ : str = tesseract_config if tesseract_config is not None else self.tesseract_config
UpperCamelCase__ : Dict = make_list_of_images(SCREAMING_SNAKE_CASE )
if not valid_images(SCREAMING_SNAKE_CASE ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
# All transformations expect numpy arrays.
UpperCamelCase__ : Optional[int] = [to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images]
if apply_ocr:
requires_backends(self , "pytesseract" )
UpperCamelCase__ : Dict = []
UpperCamelCase__ : List[Any] = []
for image in images:
UpperCamelCase__ , UpperCamelCase__ : Any = apply_tesseract(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
words_batch.append(SCREAMING_SNAKE_CASE )
boxes_batch.append(SCREAMING_SNAKE_CASE )
if do_resize:
UpperCamelCase__ : Union[str, Any] = [self.resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
UpperCamelCase__ : Any = [flip_channel_order(SCREAMING_SNAKE_CASE ) for image in images]
UpperCamelCase__ : str = [to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images]
UpperCamelCase__ : Optional[Any] = BatchFeature(data={"pixel_values": images} , tensor_type=SCREAMING_SNAKE_CASE )
if apply_ocr:
UpperCamelCase__ : Tuple = words_batch
UpperCamelCase__ : Dict = boxes_batch
return data | 228 | 0 |
'''simple docstring'''
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("0.12.2"):
raise Exception("requires fairseq >= 0.12.2")
if version.parse(fairseq.__version__) > version.parse("2"):
raise Exception("requires fairseq < v2")
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = '''Hello, World!'''
UpperCAmelCase = '''en_XX'''
def A ( A_ : Optional[int] , A_ : Optional[Any] , A_ : List[Any] ):
snake_case : List[Any] = Path('''data_bin''' )
snake_case : Any = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(_A ).parent ) , checkpoint_file=Path(_A ).name , _name='''xmod_base''' , arch='''xmod_base''' , task='''multilingual_masked_lm''' , data_name_or_path=str(_A ) , bpe='''sentencepiece''' , sentencepiece_model=str(Path(_A ).parent / '''sentencepiece.bpe.model''' ) , src_dict=str(data_dir / '''dict.txt''' ) , )
xmod.eval() # disable dropout
print(_A )
snake_case : Dict = xmod.model.encoder.sentence_encoder
snake_case : Union[str, Any] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , '''bottleneck''' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
snake_case : List[str] = xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our X-MOD config:''' , _A )
snake_case : Optional[Any] = XmodForSequenceClassification(_A ) if classification_head else XmodForMaskedLM(_A )
model.eval()
# Now let's copy all the weights.
# Embeddings
snake_case : Tuple = xmod_sent_encoder.embed_tokens.weight
snake_case : Tuple = xmod_sent_encoder.embed_positions.weight
snake_case : List[Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
snake_case : List[Any] = xmod_sent_encoder.layernorm_embedding.weight
snake_case : Any = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
snake_case : Tuple = model.roberta.encoder.layer[i]
snake_case : Tuple = xmod_sent_encoder.layers[i]
# self attention
snake_case : Tuple = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('''Dimensions of self-attention weights do not match.''' )
snake_case : Tuple = xmod_layer.self_attn.q_proj.weight
snake_case : Tuple = xmod_layer.self_attn.q_proj.bias
snake_case : List[Any] = xmod_layer.self_attn.k_proj.weight
snake_case : List[Any] = xmod_layer.self_attn.k_proj.bias
snake_case : Dict = xmod_layer.self_attn.v_proj.weight
snake_case : Optional[int] = xmod_layer.self_attn.v_proj.bias
# self-attention output
snake_case : Union[str, Any] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('''Dimensions of self-attention output weights do not match.''' )
snake_case : List[str] = xmod_layer.self_attn.out_proj.weight
snake_case : int = xmod_layer.self_attn.out_proj.bias
snake_case : Optional[int] = xmod_layer.self_attn_layer_norm.weight
snake_case : Optional[Any] = xmod_layer.self_attn_layer_norm.bias
# intermediate
snake_case : Tuple = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of intermediate weights do not match.''' )
snake_case : str = xmod_layer.fca.weight
snake_case : List[Any] = xmod_layer.fca.bias
# output
snake_case : Any = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of feed-forward weights do not match.''' )
snake_case : Tuple = xmod_layer.fca.weight
snake_case : Any = xmod_layer.fca.bias
snake_case : Union[str, Any] = xmod_layer.final_layer_norm.weight
snake_case : Union[str, Any] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
snake_case : Dict = xmod_layer.adapter_layer_norm.weight
snake_case : Any = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('''Lists of language adapters do not match.''' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
snake_case : Tuple = bert_output.adapter_modules[lang_code]
snake_case : Optional[Any] = xmod_layer.adapter_modules[lang_code]
snake_case : Tuple = from_adapter.fca.weight
snake_case : Union[str, Any] = from_adapter.fca.bias
snake_case : Any = from_adapter.fca.weight
snake_case : Union[str, Any] = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
snake_case : Optional[int] = xmod_sent_encoder.layer_norm.weight
snake_case : Optional[Any] = xmod_sent_encoder.layer_norm.bias
if classification_head:
snake_case : Tuple = xmod.model.classification_heads['''mnli'''].dense.weight
snake_case : Optional[Any] = xmod.model.classification_heads['''mnli'''].dense.bias
snake_case : List[Any] = xmod.model.classification_heads['''mnli'''].out_proj.weight
snake_case : str = xmod.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
snake_case : List[Any] = xmod.model.encoder.lm_head.dense.weight
snake_case : str = xmod.model.encoder.lm_head.dense.bias
snake_case : List[str] = xmod.model.encoder.lm_head.layer_norm.weight
snake_case : int = xmod.model.encoder.lm_head.layer_norm.bias
snake_case : Dict = xmod.model.encoder.lm_head.weight
snake_case : Optional[int] = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
snake_case : str = xmod.encode(_A ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(_A )
snake_case : str = model(_A )[0]
if classification_head:
snake_case : Dict = xmod.model.classification_heads['''mnli'''](xmod.extract_features(_A ) )
else:
snake_case : List[str] = xmod.model(_A , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
snake_case : List[str] = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
snake_case : List[Any] = torch.allclose(_A , _A , atol=1e-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
Path(_A ).mkdir(parents=_A , exist_ok=_A )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_A )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xmod_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
UpperCAmelCase = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 719 |
'''simple docstring'''
import os
import sys
UpperCAmelCase = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
UpperCAmelCase = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def A ( *A_ : Optional[Any] , **A_ : List[str] ):
return AutoConfig.from_pretrained(*A_ , **A_ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def A ( *A_ : Dict , **A_ : str ):
return AutoTokenizer.from_pretrained(*A_ , **A_ )
@add_start_docstrings(AutoModel.__doc__ )
def A ( *A_ : Union[str, Any] , **A_ : Optional[Any] ):
return AutoModel.from_pretrained(*A_ , **A_ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def A ( *A_ : str , **A_ : Optional[Any] ):
return AutoModelForCausalLM.from_pretrained(*A_ , **A_ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def A ( *A_ : List[str] , **A_ : Optional[Any] ):
return AutoModelForMaskedLM.from_pretrained(*A_ , **A_ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def A ( *A_ : Tuple , **A_ : List[str] ):
return AutoModelForSequenceClassification.from_pretrained(*A_ , **A_ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def A ( *A_ : Tuple , **A_ : Any ):
return AutoModelForQuestionAnswering.from_pretrained(*A_ , **A_ )
| 555 | 0 |
"""simple docstring"""
from math import sqrt
def SCREAMING_SNAKE_CASE_ ( snake_case : int )-> Tuple:
_lowerCamelCase = 0
for i in range(1 , int(sqrt(snake_case_ ) + 1 ) ):
if n % i == 0 and i != sqrt(snake_case_ ):
total += i + n // i
elif i == sqrt(snake_case_ ):
total += i
return total - n
def SCREAMING_SNAKE_CASE_ ( snake_case : int = 10_000 )-> Tuple:
_lowerCamelCase = sum(
i
for i in range(1 , snake_case_ )
if sum_of_divisors(sum_of_divisors(snake_case_ ) ) == i and sum_of_divisors(snake_case_ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 650 | import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase_ )
class _lowerCamelCase ( UpperCamelCase_ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
__a = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
__a = Features({"text": Value("string" )} )
__a = Features({"labels": ClassLabel} )
__a = "text"
__a = "labels"
def UpperCamelCase_ ( self , lowerCAmelCase ) -> Tuple:
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , lowerCAmelCase ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
SCREAMING_SNAKE_CASE__: Union[str, Any]= copy.deepcopy(self )
SCREAMING_SNAKE_CASE__: Tuple= self.label_schema.copy()
SCREAMING_SNAKE_CASE__: Union[str, Any]= features[self.label_column]
SCREAMING_SNAKE_CASE__: List[str]= label_schema
return task_template
@property
def UpperCamelCase_ ( self ) -> Dict[str, str]:
return {
self.text_column: "text",
self.label_column: "labels",
}
| 64 | 0 |
"""simple docstring"""
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> int:
"""simple docstring"""
A = k_size // 2
A = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
A = 1 / (2 * pi * sigma) * exp(-(square(__lowerCAmelCase ) + square(__lowerCAmelCase )) / (2 * square(__lowerCAmelCase )) )
return g
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
A = image.shape[0], image.shape[1]
# dst image height and width
A = height - k_size + 1
A = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
A = zeros((dst_height * dst_width, k_size * k_size) )
A = 0
for i, j in product(range(__lowerCAmelCase ) , range(__lowerCAmelCase ) ):
A = ravel(image[i : i + k_size, j : j + k_size] )
A = window
row += 1
# turn the kernel into shape(k*k, 1)
A = gen_gaussian_kernel(__lowerCAmelCase , __lowerCAmelCase )
A = ravel(__lowerCAmelCase )
# reshape and get the dst image
A = dot(__lowerCAmelCase , __lowerCAmelCase ).reshape(__lowerCAmelCase , __lowerCAmelCase ).astype(__lowerCAmelCase )
return dst
if __name__ == "__main__":
# read original image
UpperCamelCase : Dict = imread(R"../image_data/lena.jpg")
# turn image in gray scale value
UpperCamelCase : int = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
UpperCamelCase : List[Any] = gaussian_filter(gray, 3, sigma=1)
UpperCamelCase : Any = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow("gaussian filter with 3x3 mask", gaussianaxa)
imshow("gaussian filter with 5x5 mask", gaussianaxa)
waitKey()
| 718 |
"""simple docstring"""
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCamelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
lowerCAmelCase = TransfoXLTokenizer
lowerCAmelCase = False
lowerCAmelCase = False
def __a ( self : Union[str, Any] ):
super().setUp()
A = [
'<unk>',
'[CLS]',
'[SEP]',
'want',
'unwanted',
'wa',
'un',
'running',
',',
'low',
'l',
]
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def __a ( self : int , **_lowercase : List[str] ):
A = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def __a ( self : int , _lowercase : List[Any] ):
A = '<unk> UNwanted , running'
A = '<unk> unwanted, running'
return input_text, output_text
def __a ( self : List[str] ):
A = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=_lowercase )
A = tokenizer.tokenize('<unk> UNwanted , running' )
self.assertListEqual(_lowercase , ['<unk>', 'unwanted', ',', 'running'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , [0, 4, 8, 7] )
def __a ( self : int ):
A = TransfoXLTokenizer(lower_case=_lowercase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
def __a ( self : Optional[Any] ):
A = TransfoXLTokenizer(lower_case=_lowercase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __a ( self : str ):
A = TransfoXLTokenizer(lower_case=_lowercase )
A = 'Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'
A = [
'Hello',
'(',
'bracket',
')',
'and',
'side',
'@-@',
'scrolled',
'[',
'and',
']',
'Henry',
'\'s',
'$',
'5',
'@,@',
'000',
'with',
'3',
'@.@',
'34',
'm',
'.',
'What',
'\'s',
'up',
'!',
'?',
]
self.assertListEqual(tokenizer.tokenize(_lowercase ) , _lowercase )
self.assertEqual(tokenizer.convert_tokens_to_string(_lowercase ) , _lowercase )
def __a ( self : Dict ):
A = self.get_tokenizer()
A = len(_lowercase )
tokenizer.add_tokens(['new1', 'new2'] )
tokenizer.move_added_token('new1' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(_lowercase ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('new1' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , 'new1' )
| 91 | 0 |
from __future__ import annotations
_snake_case = list[list[int]]
# assigning initial values to the grid
_snake_case = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
_snake_case = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def A ( _lowerCamelCase ):
'''simple docstring'''
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def A ( _lowerCamelCase ):
'''simple docstring'''
if location := find_empty_location(_lowerCamelCase ):
_lowerCAmelCase , _lowerCAmelCase : int = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : List[str] = digit
if sudoku(_lowerCamelCase ) is not None:
return grid
_lowerCAmelCase : Tuple = 0
return None
def A ( _lowerCamelCase ):
'''simple docstring'''
for row in grid:
for cell in row:
print(_lowerCamelCase , end=" " )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("\nExample grid:\n" + "=" * 20)
print_solution(example_grid)
print("\nExample grid solution:")
_snake_case = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("Cannot find a solution.")
| 500 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase_ ( a):
lowerCamelCase__ = (UniPCMultistepScheduler,)
lowerCamelCase__ = (('num_inference_steps', 25),)
def snake_case__ ( self, **__a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = {
"num_train_timesteps": 1000,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
"solver_type": "bh2",
}
config.update(**__a)
return config
def snake_case__ ( self, __a=0, **__a):
'''simple docstring'''
_lowerCAmelCase : str = dict(self.forward_default_kwargs)
_lowerCAmelCase : Union[str, Any] = kwargs.pop("num_inference_steps", __a)
_lowerCAmelCase : List[str] = self.dummy_sample
_lowerCAmelCase : List[Any] = 0.1 * sample
_lowerCAmelCase : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : List[str] = self.get_scheduler_config(**__a)
_lowerCAmelCase : List[str] = scheduler_class(**__a)
scheduler.set_timesteps(__a)
# copy over dummy past residuals
_lowerCAmelCase : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__a)
_lowerCAmelCase : Optional[Any] = scheduler_class.from_pretrained(__a)
new_scheduler.set_timesteps(__a)
# copy over dummy past residuals
_lowerCAmelCase : List[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase , _lowerCAmelCase : Dict = sample, sample
for t in range(__a, time_step + scheduler.config.solver_order + 1):
_lowerCAmelCase : Dict = scheduler.step(__a, __a, __a, **__a).prev_sample
_lowerCAmelCase : int = new_scheduler.step(__a, __a, __a, **__a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def snake_case__ ( self, __a=0, **__a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = dict(self.forward_default_kwargs)
_lowerCAmelCase : List[str] = kwargs.pop("num_inference_steps", __a)
_lowerCAmelCase : Dict = self.dummy_sample
_lowerCAmelCase : List[str] = 0.1 * sample
_lowerCAmelCase : str = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : str = self.get_scheduler_config()
_lowerCAmelCase : str = scheduler_class(**__a)
scheduler.set_timesteps(__a)
# copy over dummy past residuals (must be after setting timesteps)
_lowerCAmelCase : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__a)
_lowerCAmelCase : Tuple = scheduler_class.from_pretrained(__a)
# copy over dummy past residuals
new_scheduler.set_timesteps(__a)
# copy over dummy past residual (must be after setting timesteps)
_lowerCAmelCase : Any = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase : List[Any] = scheduler.step(__a, __a, __a, **__a).prev_sample
_lowerCAmelCase : Optional[Any] = new_scheduler.step(__a, __a, __a, **__a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def snake_case__ ( self, __a=None, **__a):
'''simple docstring'''
if scheduler is None:
_lowerCAmelCase : List[Any] = self.scheduler_classes[0]
_lowerCAmelCase : List[Any] = self.get_scheduler_config(**__a)
_lowerCAmelCase : str = scheduler_class(**__a)
_lowerCAmelCase : str = self.scheduler_classes[0]
_lowerCAmelCase : str = self.get_scheduler_config(**__a)
_lowerCAmelCase : Optional[Any] = scheduler_class(**__a)
_lowerCAmelCase : int = 10
_lowerCAmelCase : str = self.dummy_model()
_lowerCAmelCase : List[str] = self.dummy_sample_deter
scheduler.set_timesteps(__a)
for i, t in enumerate(scheduler.timesteps):
_lowerCAmelCase : Tuple = model(__a, __a)
_lowerCAmelCase : Union[str, Any] = scheduler.step(__a, __a, __a).prev_sample
return sample
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = dict(self.forward_default_kwargs)
_lowerCAmelCase : List[str] = kwargs.pop("num_inference_steps", __a)
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : int = self.get_scheduler_config()
_lowerCAmelCase : str = scheduler_class(**__a)
_lowerCAmelCase : List[str] = self.dummy_sample
_lowerCAmelCase : str = 0.1 * sample
if num_inference_steps is not None and hasattr(__a, "set_timesteps"):
scheduler.set_timesteps(__a)
elif num_inference_steps is not None and not hasattr(__a, "set_timesteps"):
_lowerCAmelCase : str = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_lowerCAmelCase : Tuple = [residual + 0.2, residual + 0.15, residual + 0.10]
_lowerCAmelCase : Dict = dummy_past_residuals[: scheduler.config.solver_order]
_lowerCAmelCase : Optional[int] = scheduler.timesteps[5]
_lowerCAmelCase : Tuple = scheduler.timesteps[6]
_lowerCAmelCase : int = scheduler.step(__a, __a, __a, **__a).prev_sample
_lowerCAmelCase : Optional[Any] = scheduler.step(__a, __a, __a, **__a).prev_sample
self.assertEqual(output_a.shape, sample.shape)
self.assertEqual(output_a.shape, output_a.shape)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = UniPCMultistepScheduler(**self.get_scheduler_config())
_lowerCAmelCase : Optional[Any] = self.full_loop(scheduler=__a)
_lowerCAmelCase : List[str] = torch.mean(torch.abs(__a))
assert abs(result_mean.item() - 0.2_464) < 1E-3
_lowerCAmelCase : Tuple = DPMSolverSinglestepScheduler.from_config(scheduler.config)
_lowerCAmelCase : Any = DEISMultistepScheduler.from_config(scheduler.config)
_lowerCAmelCase : Any = DPMSolverMultistepScheduler.from_config(scheduler.config)
_lowerCAmelCase : Optional[Any] = UniPCMultistepScheduler.from_config(scheduler.config)
_lowerCAmelCase : Any = self.full_loop(scheduler=__a)
_lowerCAmelCase : Any = torch.mean(torch.abs(__a))
assert abs(result_mean.item() - 0.2_464) < 1E-3
def snake_case__ ( self):
'''simple docstring'''
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=__a)
def snake_case__ ( self):
'''simple docstring'''
self.check_over_configs(thresholding=__a)
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__a, prediction_type=__a, sample_max_value=__a, solver_order=__a, solver_type=__a, )
def snake_case__ ( self):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__a)
def snake_case__ ( self):
'''simple docstring'''
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__a, solver_type=__a, prediction_type=__a, )
_lowerCAmelCase : Optional[int] = self.full_loop(
solver_order=__a, solver_type=__a, prediction_type=__a, )
assert not torch.isnan(__a).any(), "Samples have nan numbers"
def snake_case__ ( self):
'''simple docstring'''
self.check_over_configs(lower_order_final=__a)
self.check_over_configs(lower_order_final=__a)
def snake_case__ ( self):
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=__a, time_step=0)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.full_loop()
_lowerCAmelCase : str = torch.mean(torch.abs(__a))
assert abs(result_mean.item() - 0.2_464) < 1E-3
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.full_loop(prediction_type="v_prediction")
_lowerCAmelCase : Union[str, Any] = torch.mean(torch.abs(__a))
assert abs(result_mean.item() - 0.1_014) < 1E-3
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.scheduler_classes[0]
_lowerCAmelCase : Union[str, Any] = self.get_scheduler_config(thresholding=__a, dynamic_thresholding_ratio=0)
_lowerCAmelCase : Optional[int] = scheduler_class(**__a)
_lowerCAmelCase : str = 10
_lowerCAmelCase : List[str] = self.dummy_model()
_lowerCAmelCase : Tuple = self.dummy_sample_deter.half()
scheduler.set_timesteps(__a)
for i, t in enumerate(scheduler.timesteps):
_lowerCAmelCase : Dict = model(__a, __a)
_lowerCAmelCase : List[str] = scheduler.step(__a, __a, __a).prev_sample
assert sample.dtype == torch.floataa
def snake_case__ ( self, **__a):
'''simple docstring'''
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Any = self.get_scheduler_config(**__a)
_lowerCAmelCase : List[Any] = scheduler_class(**__a)
scheduler.set_timesteps(scheduler.config.num_train_timesteps)
assert len(scheduler.timesteps.unique()) == scheduler.num_inference_steps
| 500 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
class _UpperCamelCase (a_ ):
snake_case_ = ["""pixel_values"""]
def __init__( self , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = PILImageResampling.BICUBIC , __UpperCamelCase = True , __UpperCamelCase = True , __UpperCamelCase = 1 / 2_5_5 , __UpperCamelCase = None , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , )-> None:
super().__init__(**__UpperCamelCase )
__lowerCAmelCase = size if size is not None else {"height": 2_2_4, "width": 2_2_4}
__lowerCAmelCase = get_size_dict(__UpperCamelCase )
__lowerCAmelCase = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4}
__lowerCAmelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase , param_name="crop_size" )
__lowerCAmelCase = do_resize
__lowerCAmelCase = do_rescale
__lowerCAmelCase = do_normalize
__lowerCAmelCase = do_center_crop
__lowerCAmelCase = crop_size
__lowerCAmelCase = size
__lowerCAmelCase = resample
__lowerCAmelCase = rescale_factor
__lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__lowerCAmelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = PILImageResampling.BILINEAR , __UpperCamelCase = None , **__UpperCamelCase , )-> np.ndarray:
__lowerCAmelCase = get_size_dict(__UpperCamelCase )
if "shortest_edge" in size:
__lowerCAmelCase = get_resize_output_image_size(__UpperCamelCase , size=size["shortest_edge"] , default_to_square=__UpperCamelCase )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
__lowerCAmelCase = (size["height"], size["width"])
else:
raise ValueError(F"""Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}""" )
return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , )-> np.ndarray:
__lowerCAmelCase = get_size_dict(__UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(__UpperCamelCase , size=(size["height"], size["width"]) , data_format=__UpperCamelCase , **__UpperCamelCase )
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase )-> np.ndarray:
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , )-> np.ndarray:
return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = ChannelDimension.FIRST , **__UpperCamelCase , )-> BatchFeature:
__lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
__lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
__lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
__lowerCAmelCase = get_size_dict(__UpperCamelCase , param_name="crop_size" , default_to_square=__UpperCamelCase )
__lowerCAmelCase = resample if resample is not None else self.resample
__lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
__lowerCAmelCase = image_std if image_std is not None else self.image_std
__lowerCAmelCase = size if size is not None else self.size
__lowerCAmelCase = get_size_dict(__UpperCamelCase )
if not is_batched(__UpperCamelCase ):
__lowerCAmelCase = [images]
if not valid_images(__UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
__lowerCAmelCase = [to_numpy_array(__UpperCamelCase ) for image in images]
if do_resize:
__lowerCAmelCase = [self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase ) for image in images]
if do_center_crop:
__lowerCAmelCase = [self.center_crop(image=__UpperCamelCase , size=__UpperCamelCase ) for image in images]
if do_rescale:
__lowerCAmelCase = [self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase ) for image in images]
if do_normalize:
__lowerCAmelCase = [self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase ) for image in images]
__lowerCAmelCase = [to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase ) for image in images]
__lowerCAmelCase = {"pixel_values": images}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
| 290 |
import os
import sys
import unittest
lowerCamelCase : int = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
lowerCamelCase : Any = os.path.join(git_repo_path, '''src''', '''diffusers''')
class _UpperCamelCase (unittest.TestCase ):
def __UpperCAmelCase ( self )-> Optional[int]:
__lowerCAmelCase = find_backend(" if not is_torch_available():" )
self.assertEqual(__UpperCamelCase , "torch" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
__lowerCAmelCase = find_backend(" if not (is_torch_available() and is_transformers_available()):" )
self.assertEqual(__UpperCamelCase , "torch_and_transformers" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
__lowerCAmelCase = find_backend(
" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):" )
self.assertEqual(__UpperCamelCase , "torch_and_transformers_and_onnx" )
def __UpperCAmelCase ( self )-> Dict:
__lowerCAmelCase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" , __UpperCamelCase )
self.assertIn("torch_and_transformers" , __UpperCamelCase )
self.assertIn("flax_and_transformers" , __UpperCamelCase )
self.assertIn("torch_and_transformers_and_onnx" , __UpperCamelCase )
# Likewise, we can't assert on the exact content of a key
self.assertIn("UNet2DModel" , objects["torch"] )
self.assertIn("FlaxUNet2DConditionModel" , objects["flax"] )
self.assertIn("StableDiffusionPipeline" , objects["torch_and_transformers"] )
self.assertIn("FlaxStableDiffusionPipeline" , objects["flax_and_transformers"] )
self.assertIn("LMSDiscreteScheduler" , objects["torch_and_scipy"] )
self.assertIn("OnnxStableDiffusionPipeline" , objects["torch_and_transformers_and_onnx"] )
def __UpperCAmelCase ( self )-> Any:
__lowerCAmelCase = create_dummy_object("CONSTANT" , "'torch'" )
self.assertEqual(__UpperCamelCase , "\nCONSTANT = None\n" )
__lowerCAmelCase = create_dummy_object("function" , "'torch'" )
self.assertEqual(
__UpperCamelCase , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" )
__lowerCAmelCase = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n"
__lowerCAmelCase = create_dummy_object("FakeClass" , "'torch'" )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def __UpperCAmelCase ( self )-> Tuple:
__lowerCAmelCase = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n"
__lowerCAmelCase = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"] , __UpperCamelCase )
| 290 | 1 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"The `inpainting.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionInpaintPipeline` instead."
)
| 89 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __SCREAMING_SNAKE_CASE (__A ):
"""simple docstring"""
_a : List[Any] = ['''image_processor''', '''tokenizer''']
_a : List[Any] = '''ViTImageProcessor'''
_a : int = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ):
"""simple docstring"""
a_ = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCamelCase__ , )
a_ = kwargs.pop('feature_extractor' )
a_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ):
"""simple docstring"""
if text is None and visual_prompt is None and images is None:
raise ValueError('You have to specify either text, visual prompt or images.' )
if text is not None and visual_prompt is not None:
raise ValueError('You have to specify exactly one type of prompt. Either text or visual prompt.' )
if text is not None:
a_ = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if visual_prompt is not None:
a_ = self.image_processor(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if images is not None:
a_ = self.image_processor(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if visual_prompt is not None and images is not None:
a_ = {
'pixel_values': image_features.pixel_values,
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
a_ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
a_ = {
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase__ ) , tensor_type=UpperCamelCase__ )
def _a ( self , *UpperCamelCase__ , **UpperCamelCase__ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def _a ( self , *UpperCamelCase__ , **UpperCamelCase__ ):
"""simple docstring"""
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def _a ( self ):
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , UpperCamelCase__ , )
return self.image_processor_class
@property
def _a ( self ):
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , UpperCamelCase__ , )
return self.image_processor
| 536 | 0 |
'''simple docstring'''
import random
def __magic_name__ ( A : int ):
'''simple docstring'''
a = num - 1
a = 0
while s % 2 == 0:
a = s // 2
t += 1
for _ in range(5 ):
a = random.randrange(2, num - 1 )
a = pow(A, A, A )
if v != 1:
a = 0
while v != (num - 1):
if i == t - 1:
return False
else:
a = i + 1
a = (v**2) % num
return True
def __magic_name__ ( A : int ):
'''simple docstring'''
if num < 2:
return False
a = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
101,
103,
107,
109,
113,
127,
131,
137,
139,
149,
151,
157,
163,
167,
173,
179,
181,
191,
193,
197,
199,
211,
223,
227,
229,
233,
239,
241,
251,
257,
263,
269,
271,
277,
281,
283,
293,
307,
311,
313,
317,
331,
337,
347,
349,
353,
359,
367,
373,
379,
383,
389,
397,
401,
409,
419,
421,
431,
433,
439,
443,
449,
457,
461,
463,
467,
479,
487,
491,
499,
503,
509,
521,
523,
541,
547,
557,
563,
569,
571,
577,
587,
593,
599,
601,
607,
613,
617,
619,
631,
641,
643,
647,
653,
659,
661,
673,
677,
683,
691,
701,
709,
719,
727,
733,
739,
743,
751,
757,
761,
769,
773,
787,
797,
809,
811,
821,
823,
827,
829,
839,
853,
857,
859,
863,
877,
881,
883,
887,
907,
911,
919,
929,
937,
941,
947,
953,
967,
971,
977,
983,
991,
997,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(A )
def __magic_name__ ( A : int = 1024 ):
'''simple docstring'''
while True:
a = random.randrange(2 ** (keysize - 1), 2 ** (keysize) )
if is_prime_low_num(A ):
return num
if __name__ == "__main__":
__lowerCAmelCase : Any = generate_large_prime()
print(('Prime number:', num))
print(('is_prime_low_num:', is_prime_low_num(num)))
| 702 |
def __magic_name__ ( A : str, A : str ):
'''simple docstring'''
def get_matched_characters(A : str, A : str ) -> str:
a = []
a = min(len(_stra ), len(_stra ) ) // 2
for i, l in enumerate(_stra ):
a = int(max(0, i - limit ) )
a = int(min(i + limit + 1, len(_stra ) ) )
if l in _stra[left:right]:
matched.append(A )
a = F"""{_stra[0:_stra.index(A )]} {_stra[_stra.index(A ) + 1:]}"""
return "".join(A )
# matching characters
a = get_matched_characters(A, A )
a = get_matched_characters(A, A )
a = len(A )
# transposition
a = (
len([(ca, ca) for ca, ca in zip(A, A ) if ca != ca] ) // 2
)
if not match_count:
a = 0.0
else:
a = (
1
/ 3
* (
match_count / len(A )
+ match_count / len(A )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
a = 0
for ca, ca in zip(stra[:4], stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 662 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json",
}
class a ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase_ : Dict = 'deta'
UpperCamelCase_ : str = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : List[Any] , lowerCamelCase__ : int=None , lowerCamelCase__ : int=900 , lowerCamelCase__ : int=2_048 , lowerCamelCase__ : Dict=6 , lowerCamelCase__ : Any=2_048 , lowerCamelCase__ : Optional[int]=8 , lowerCamelCase__ : List[str]=6 , lowerCamelCase__ : Tuple=1_024 , lowerCamelCase__ : str=8 , lowerCamelCase__ : List[str]=0.0 , lowerCamelCase__ : int=True , lowerCamelCase__ : int="relu" , lowerCamelCase__ : Tuple=256 , lowerCamelCase__ : List[str]=0.1 , lowerCamelCase__ : Dict=0.0 , lowerCamelCase__ : Optional[Any]=0.0 , lowerCamelCase__ : Optional[int]=0.0_2 , lowerCamelCase__ : int=1.0 , lowerCamelCase__ : Optional[int]=True , lowerCamelCase__ : int=False , lowerCamelCase__ : Any="sine" , lowerCamelCase__ : int=5 , lowerCamelCase__ : Union[str, Any]=4 , lowerCamelCase__ : str=4 , lowerCamelCase__ : int=True , lowerCamelCase__ : int=300 , lowerCamelCase__ : str=True , lowerCamelCase__ : Optional[Any]=True , lowerCamelCase__ : List[str]=1 , lowerCamelCase__ : str=5 , lowerCamelCase__ : Optional[Any]=2 , lowerCamelCase__ : Union[str, Any]=1 , lowerCamelCase__ : Tuple=1 , lowerCamelCase__ : str=5 , lowerCamelCase__ : Optional[Any]=2 , lowerCamelCase__ : List[str]=0.1 , lowerCamelCase__ : str=0.2_5 , **lowerCamelCase__ : Optional[Any] , ) -> Dict:
"""simple docstring"""
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
__lowercase = CONFIG_MAPPING['''resnet'''](out_features=['''stage2''', '''stage3''', '''stage4'''] )
else:
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
__lowercase = backbone_config.pop('''model_type''' )
__lowercase = CONFIG_MAPPING[backbone_model_type]
__lowercase = config_class.from_dict(lowerCamelCase__ )
__lowercase = backbone_config
__lowercase = num_queries
__lowercase = max_position_embeddings
__lowercase = d_model
__lowercase = encoder_ffn_dim
__lowercase = encoder_layers
__lowercase = encoder_attention_heads
__lowercase = decoder_ffn_dim
__lowercase = decoder_layers
__lowercase = decoder_attention_heads
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = activation_function
__lowercase = init_std
__lowercase = init_xavier_std
__lowercase = encoder_layerdrop
__lowercase = auxiliary_loss
__lowercase = position_embedding_type
# deformable attributes
__lowercase = num_feature_levels
__lowercase = encoder_n_points
__lowercase = decoder_n_points
__lowercase = two_stage
__lowercase = two_stage_num_proposals
__lowercase = with_box_refine
__lowercase = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
__lowercase = class_cost
__lowercase = bbox_cost
__lowercase = giou_cost
# Loss coefficients
__lowercase = mask_loss_coefficient
__lowercase = dice_loss_coefficient
__lowercase = bbox_loss_coefficient
__lowercase = giou_loss_coefficient
__lowercase = eos_coefficient
__lowercase = focal_alpha
super().__init__(is_encoder_decoder=lowerCamelCase__ , **lowerCamelCase__ )
@property
def UpperCAmelCase_ ( self : List[str] ) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def UpperCAmelCase_ ( self : Tuple ) -> int:
"""simple docstring"""
return self.d_model
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = copy.deepcopy(self.__dict__ )
__lowercase = self.backbone_config.to_dict()
__lowercase = self.__class__.model_type
return output
| 332 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
UpperCAmelCase__ = None
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase__ = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/tokenizer.json",
},
}
UpperCAmelCase__ = {
"camembert-base": 512,
}
UpperCAmelCase__ = "▁"
class a ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase_ : Tuple = VOCAB_FILES_NAMES
UpperCamelCase_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : int = ['input_ids', 'attention_mask']
UpperCamelCase_ : Any = CamembertTokenizer
def __init__( self : Dict , lowerCamelCase__ : Optional[Any]=None , lowerCamelCase__ : Optional[int]=None , lowerCamelCase__ : List[str]="<s>" , lowerCamelCase__ : List[Any]="</s>" , lowerCamelCase__ : Tuple="</s>" , lowerCamelCase__ : List[str]="<s>" , lowerCamelCase__ : Optional[Any]="<unk>" , lowerCamelCase__ : Optional[Any]="<pad>" , lowerCamelCase__ : Optional[int]="<mask>" , lowerCamelCase__ : Optional[int]=["<s>NOTUSED", "</s>NOTUSED"] , **lowerCamelCase__ : Optional[int] , ) -> List[Any]:
"""simple docstring"""
__lowercase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
super().__init__(
lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , additional_special_tokens=lowerCamelCase__ , **lowerCamelCase__ , )
__lowercase = vocab_file
__lowercase = False if not self.vocab_file else True
def UpperCAmelCase_ ( self : Optional[Any] , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowercase = [self.cls_token_id]
__lowercase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase_ ( self : Any , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase_ ( self : str , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(lowerCamelCase__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__lowercase = os.path.join(
lowerCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ):
copyfile(self.vocab_file , lowerCamelCase__ )
return (out_vocab_file,)
| 332 | 1 |
"""simple docstring"""
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
__lowerCAmelCase : Any = '''\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
'''
__lowerCAmelCase : Any = '''\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper "Evaluating Large Language Models Trained on Code"
(https://arxiv.org/abs/2107.03374).
'''
__lowerCAmelCase : str = '''
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric("code_eval")
>>> test_cases = ["assert add(2,3)==5"]
>>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{\'pass@1\': 0.5, \'pass@2\': 1.0}
'''
__lowerCAmelCase : List[str] = '''
################################################################################
!!!WARNING!!!
################################################################################
The "code_eval" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper "Evaluating Large
Language Models Trained on Code" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this
with:
>>> import os
>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"
################################################################################\
'''
__lowerCAmelCase : str = '''The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
'''simple docstring'''
def _UpperCamelCase ( self :str ) -> Any:
'''simple docstring'''
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/openai/human-eval''' , codebase_urls=['''https://github.com/openai/human-eval'''] , reference_urls=['''https://github.com/openai/human-eval'''] , license=_LICENSE , )
def _UpperCamelCase ( self :Optional[Any] , __magic_name__ :Union[str, Any] , __magic_name__ :Optional[Any] , __magic_name__ :str=[1, 10, 100] , __magic_name__ :int=4 , __magic_name__ :List[str]=3.0 ) -> Any:
'''simple docstring'''
if os.getenv('''HF_ALLOW_CODE_EVAL''' , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError('''This metric is currently not supported on Windows.''' )
with ThreadPoolExecutor(max_workers=__magic_name__ ) as executor:
a__ = []
a__ = Counter()
a__ = 0
a__ = defaultdict(__magic_name__ )
for task_id, (candidates, test_case) in enumerate(zip(__magic_name__ , __magic_name__ ) ):
for candidate in candidates:
a__ = candidate + '''\n''' + test_case
a__ = (test_program, timeout, task_id, completion_id[task_id])
a__ = executor.submit(__magic_name__ , *__magic_name__ )
futures.append(__magic_name__ )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(__magic_name__ ):
a__ = future.result()
results[result["task_id"]].append((result['''completion_id'''], result) )
a__ , a__ = [], []
for result in results.values():
result.sort()
a__ = [r[1]['''passed'''] for r in result]
total.append(len(__magic_name__ ) )
correct.append(sum(__magic_name__ ) )
a__ = np.array(__magic_name__ )
a__ = np.array(__magic_name__ )
a__ = k
a__ = {F"pass@{k}": estimate_pass_at_k(__magic_name__ , __magic_name__ , __magic_name__ ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def __snake_case ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[str]:
"""simple docstring"""
def estimator(UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(UpperCamelCase , UpperCamelCase ):
a__ = itertools.repeat(UpperCamelCase , len(UpperCamelCase ) )
else:
assert len(UpperCamelCase ) == len(UpperCamelCase )
a__ = iter(UpperCamelCase )
return np.array([estimator(int(UpperCamelCase ) , int(UpperCamelCase ) , UpperCamelCase ) for n, c in zip(UpperCamelCase , UpperCamelCase )] )
| 709 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowerCAmelCase : str = {
'''configuration_swiftformer''': [
'''SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SwiftFormerConfig''',
'''SwiftFormerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Tuple = [
'''SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwiftFormerForImageClassification''',
'''SwiftFormerModel''',
'''SwiftFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 158 | 0 |
"""simple docstring"""
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = False, False, False
@dataclass
class lowercase__ :
__UpperCAmelCase = None
__UpperCAmelCase = True
__UpperCAmelCase = True
__UpperCAmelCase = None
# Automatically constructed
__UpperCAmelCase = "dict"
__UpperCAmelCase = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
__UpperCAmelCase = field(default='''Audio''' ,init=A_ ,repr=A_ )
def __call__( self) -> Optional[int]:
return self.pa_type
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> dict:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("""To support encoding audio data, please install 'soundfile'.""") from err
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
return {"bytes": None, "path": value}
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
_lowerCamelCase : str = BytesIO()
sf.write(SCREAMING_SNAKE_CASE , value["""array"""] , value["""sampling_rate"""] , format="""wav""")
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("""path""") is not None and os.path.isfile(value["""path"""]):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("""pcm"""):
# "PCM" only has raw audio bytes
if value.get("""sampling_rate""") is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("""To use PCM files, please specify a 'sampling_rate' in Audio object""")
if value.get("""bytes"""):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
_lowerCamelCase : Dict = np.frombuffer(value["""bytes"""] , dtype=np.intaa).astype(np.floataa) / 3_2767
else:
_lowerCamelCase : Optional[Any] = np.memmap(value["""path"""] , dtype="""h""" , mode="""r""").astype(np.floataa) / 3_2767
_lowerCamelCase : List[Any] = BytesIO(bytes())
sf.write(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , value["""sampling_rate"""] , format="""wav""")
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("""path""")}
elif value.get("""bytes""") is not None or value.get("""path""") is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("""bytes"""), "path": value.get("""path""")}
else:
raise ValueError(
F'An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.')
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None) -> dict:
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Audio(decode=True) instead.""")
_lowerCamelCase , _lowerCamelCase : Optional[Any] = (value["""path"""], BytesIO(value["""bytes"""])) if value["""bytes"""] is not None else (value["""path"""], None)
if path is None and file is None:
raise ValueError(F'An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.')
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("""To support decoding audio files, please install 'librosa' and 'soundfile'.""") from err
_lowerCamelCase : List[Any] = xsplitext(SCREAMING_SNAKE_CASE)[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"""Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """)
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"""Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """)
if file is None:
_lowerCamelCase : List[Any] = token_per_repo_id or {}
_lowerCamelCase : Dict = path.split("""::""")[-1]
try:
_lowerCamelCase : str = string_to_dict(SCREAMING_SNAKE_CASE , config.HUB_DATASETS_URL)["""repo_id"""]
_lowerCamelCase : Tuple = token_per_repo_id[repo_id]
except (ValueError, KeyError):
_lowerCamelCase : Tuple = None
with xopen(SCREAMING_SNAKE_CASE , """rb""" , use_auth_token=SCREAMING_SNAKE_CASE) as f:
_lowerCamelCase , _lowerCamelCase : Dict = sf.read(SCREAMING_SNAKE_CASE)
else:
_lowerCamelCase , _lowerCamelCase : Tuple = sf.read(SCREAMING_SNAKE_CASE)
_lowerCamelCase : str = array.T
if self.mono:
_lowerCamelCase : Optional[int] = librosa.to_mono(SCREAMING_SNAKE_CASE)
if self.sampling_rate and self.sampling_rate != sampling_rate:
_lowerCamelCase : Optional[Any] = librosa.resample(SCREAMING_SNAKE_CASE , orig_sr=SCREAMING_SNAKE_CASE , target_sr=self.sampling_rate)
_lowerCamelCase : Dict = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def UpperCamelCase_ ( self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
if self.decode:
raise ValueError("""Cannot flatten a decoded Audio feature.""")
return {
"bytes": Value("""binary"""),
"path": Value("""string"""),
}
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> pa.StructArray:
if pa.types.is_string(storage.type):
_lowerCamelCase : Dict = pa.array([None] * len(SCREAMING_SNAKE_CASE) , type=pa.binary())
_lowerCamelCase : Optional[Any] = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null())
elif pa.types.is_binary(storage.type):
_lowerCamelCase : List[Any] = pa.array([None] * len(SCREAMING_SNAKE_CASE) , type=pa.string())
_lowerCamelCase : List[Any] = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null())
elif pa.types.is_struct(storage.type) and storage.type.get_all_field_indices("""array"""):
_lowerCamelCase : List[Any] = pa.array([Audio().encode_example(SCREAMING_SNAKE_CASE) if x is not None else None for x in storage.to_pylist()])
elif pa.types.is_struct(storage.type):
if storage.type.get_field_index("""bytes""") >= 0:
_lowerCamelCase : Any = storage.field("""bytes""")
else:
_lowerCamelCase : List[Any] = pa.array([None] * len(SCREAMING_SNAKE_CASE) , type=pa.binary())
if storage.type.get_field_index("""path""") >= 0:
_lowerCamelCase : List[Any] = storage.field("""path""")
else:
_lowerCamelCase : List[str] = pa.array([None] * len(SCREAMING_SNAKE_CASE) , type=pa.string())
_lowerCamelCase : Tuple = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null())
return array_cast(SCREAMING_SNAKE_CASE , self.pa_type)
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(SCREAMING_SNAKE_CASE):
with xopen(SCREAMING_SNAKE_CASE , """rb""") as f:
_lowerCamelCase : int = f.read()
return bytes_
_lowerCamelCase : Any = pa.array(
[
(path_to_bytes(x["""path"""]) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
_lowerCamelCase : Tuple = pa.array(
[os.path.basename(SCREAMING_SNAKE_CASE) if path is not None else None for path in storage.field("""path""").to_pylist()] , type=pa.string() , )
_lowerCamelCase : Optional[Any] = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null())
return array_cast(SCREAMING_SNAKE_CASE , self.pa_type)
| 88 |
"""simple docstring"""
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _snake_case ( ):
"""simple docstring"""
_lowerCamelCase : Any = HfArgumentParser(__snake_case )
_lowerCamelCase : int = parser.parse_args_into_dataclasses()[0]
_lowerCamelCase : Dict = TensorFlowBenchmark(args=__snake_case )
try:
_lowerCamelCase : Optional[int] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
_lowerCamelCase : Union[str, Any] = """Arg --no_{0} is no longer used, please use --no-{0} instead."""
_lowerCamelCase : List[str] = """ """.join(str(__snake_case ).split(""" """ )[:-1] )
_lowerCamelCase : Dict = """"""
_lowerCamelCase : List[Any] = eval(str(__snake_case ).split(""" """ )[-1] )
_lowerCamelCase : Tuple = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(__snake_case )
if len(__snake_case ) > 0:
_lowerCamelCase : Tuple = full_error_msg + begin_error_msg + str(__snake_case )
raise ValueError(__snake_case )
benchmark.run()
if __name__ == "__main__":
main()
| 88 | 1 |
'''simple docstring'''
def __lowercase ( __SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
if p < 2:
raise ValueError("""p should not be less than 2!""" )
elif p == 2:
return True
__a = 4
__a = (1 << p) - 1
for _ in range(p - 2 ):
__a = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 201 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class lowerCAmelCase_ ( snake_case__ ):
"""simple docstring"""
a_ :Dict =["""image_processor""", """feature_extractor"""]
a_ :str ="""TvltImageProcessor"""
a_ :str ="""TvltFeatureExtractor"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
super().__init__(image_processor=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ )
__a = image_processor
__a = feature_extractor
def __call__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : List[str]=False , *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : Optional[int] , ):
'''simple docstring'''
if images is None and audio is None:
raise ValueError("""You need to specify either an `images` or `audio` input to process.""" )
__a = None
if images is not None:
__a = self.image_processor(SCREAMING_SNAKE_CASE__ , mask_pixel=SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if images_mixed is not None:
__a = self.image_processor(SCREAMING_SNAKE_CASE__ , is_mixed=SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if audio is not None:
__a = self.feature_extractor(
SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , sampling_rate=SCREAMING_SNAKE_CASE__ , mask_audio=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__a = {}
if audio is not None:
output_dict.update(SCREAMING_SNAKE_CASE__ )
if images is not None:
output_dict.update(SCREAMING_SNAKE_CASE__ )
if images_mixed_dict is not None:
output_dict.update(SCREAMING_SNAKE_CASE__ )
return output_dict
@property
def __a ( self : List[str] ):
'''simple docstring'''
__a = self.image_processor.model_input_names
__a = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 201 | 1 |
'''simple docstring'''
import argparse
import json
from tqdm import tqdm
def __snake_case ( ) -> str:
"""simple docstring"""
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--src_path''' , type=SCREAMING_SNAKE_CASE_ , default='''biencoder-nq-dev.json''' , help='''Path to raw DPR training data''' , )
parser.add_argument(
'''--evaluation_set''' , type=SCREAMING_SNAKE_CASE_ , help='''where to store parsed evaluation_set file''' , )
parser.add_argument(
'''--gold_data_path''' , type=SCREAMING_SNAKE_CASE_ , help='''where to store parsed gold_data_path file''' , )
UpperCAmelCase = parser.parse_args()
with open(args.src_path , '''r''' ) as src_file, open(args.evaluation_set , '''w''' ) as eval_file, open(
args.gold_data_path , '''w''' ) as gold_file:
UpperCAmelCase = json.load(SCREAMING_SNAKE_CASE_ )
for dpr_record in tqdm(SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase = dpr_record['''question''']
UpperCAmelCase = [context['''title'''] for context in dpr_record['''positive_ctxs''']]
eval_file.write(question + '''\n''' )
gold_file.write('''\t'''.join(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
if __name__ == "__main__":
main()
| 51 |
'''simple docstring'''
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
a__ : List[Any] = 'sshleifer/mar_enro_6_3_student'
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
def __snake_case ( self : Dict ):
super().setUp()
UpperCAmelCase = cached_path(
'''https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz''' , extract_compressed_file=a__ , )
UpperCAmelCase = f"{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"
@slow
@require_torch_gpu
def __snake_case ( self : Optional[int] ):
MarianMTModel.from_pretrained(a__ )
@slow
@require_torch_gpu
def __snake_case ( self : Union[str, Any] ):
UpperCAmelCase = {
'''$MAX_LEN''': 64,
'''$BS''': 64,
'''$GAS''': 1,
'''$ENRO_DIR''': self.data_dir,
'''facebook/mbart-large-cc25''': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'''--learning_rate=3e-5''': '''--learning_rate 3e-4''',
'''--num_train_epochs 6''': '''--num_train_epochs 1''',
}
# Clean up bash script
UpperCAmelCase = (self.test_file_dir / '''train_mbart_cc25_enro.sh''').open().read().split('''finetune.py''' )[1].strip()
UpperCAmelCase = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
for k, v in env_vars_to_replace.items():
UpperCAmelCase = bash_script.replace(a__ , str(a__ ) )
UpperCAmelCase = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
UpperCAmelCase = f"\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n ".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
UpperCAmelCase = ['''finetune.py'''] + bash_script.split() + args
with patch.object(a__ , '''argv''' , a__ ):
UpperCAmelCase = argparse.ArgumentParser()
UpperCAmelCase = pl.Trainer.add_argparse_args(a__ )
UpperCAmelCase = SummarizationModule.add_model_specific_args(a__ , os.getcwd() )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = main(a__ )
# Check metrics
UpperCAmelCase = load_json(model.metrics_save_path )
UpperCAmelCase = metrics['''val'''][0]
UpperCAmelCase = metrics['''val'''][-1]
self.assertEqual(len(metrics['''val'''] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , a__ )
self.assertGreater(last_step_stats['''val_avg_gen_time'''] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['''val_avg_gen_time'''] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['''val_avg_bleu'''] - first_step_stats['''val_avg_bleu'''] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['''val_avg_bleu'''] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['''val'''][-1]['''val_avg_bleu'''] - metrics['''test'''][-1]['''test_avg_bleu'''] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
UpperCAmelCase = os.listdir(a__ )
UpperCAmelCase = [x for x in contents if x.endswith('''.ckpt''' )][0]
UpperCAmelCase = os.path.join(args.output_dir , a__ )
UpperCAmelCase = torch.load(a__ , map_location='''cpu''' )
UpperCAmelCase = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
UpperCAmelCase = {os.path.basename(a__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def __snake_case ( self : Any ):
UpperCAmelCase = f"{self.test_file_dir_str}/test_data/wmt_en_ro"
UpperCAmelCase = {
'''--fp16_opt_level=O1''': '''''',
'''$MAX_LEN''': 128,
'''$BS''': 16,
'''$GAS''': 1,
'''$ENRO_DIR''': data_dir,
'''$m''': '''sshleifer/student_marian_en_ro_6_1''',
'''val_check_interval=0.25''': '''val_check_interval=1.0''',
}
# Clean up bash script
UpperCAmelCase = (
(self.test_file_dir / '''distil_marian_no_teacher.sh''').open().read().split('''distillation.py''' )[1].strip()
)
UpperCAmelCase = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
UpperCAmelCase = bash_script.replace('''--fp16 ''' , ''' ''' )
for k, v in env_vars_to_replace.items():
UpperCAmelCase = bash_script.replace(a__ , str(a__ ) )
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = bash_script.replace('''--fp16''' , '''''' )
UpperCAmelCase = 6
UpperCAmelCase = (
['''distillation.py''']
+ bash_script.split()
+ [
f"--output_dir={output_dir}",
'''--gpus=1''',
'''--learning_rate=1e-3''',
f"--num_train_epochs={epochs}",
'''--warmup_steps=10''',
'''--val_check_interval=1.0''',
'''--do_predict''',
]
)
with patch.object(a__ , '''argv''' , a__ ):
UpperCAmelCase = argparse.ArgumentParser()
UpperCAmelCase = pl.Trainer.add_argparse_args(a__ )
UpperCAmelCase = SummarizationDistiller.add_model_specific_args(a__ , os.getcwd() )
UpperCAmelCase = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
UpperCAmelCase = distill_main(a__ )
# Check metrics
UpperCAmelCase = load_json(model.metrics_save_path )
UpperCAmelCase = metrics['''val'''][0]
UpperCAmelCase = metrics['''val'''][-1]
assert len(metrics['''val'''] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , a__ )
# check lightning ckpt can be loaded and has a reasonable statedict
UpperCAmelCase = os.listdir(a__ )
UpperCAmelCase = [x for x in contents if x.endswith('''.ckpt''' )][0]
UpperCAmelCase = os.path.join(args.output_dir , a__ )
UpperCAmelCase = torch.load(a__ , map_location='''cpu''' )
UpperCAmelCase = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
UpperCAmelCase = {os.path.basename(a__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
| 51 | 1 |
def lowerCamelCase_ ( UpperCAmelCase_ : str ) -> str:
'''simple docstring'''
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 648 |
def lowerCamelCase_ ( UpperCAmelCase_ : list ) -> list:
'''simple docstring'''
if len(UpperCAmelCase_ ) <= 1:
return [tuple(UpperCAmelCase_ )]
_UpperCamelCase : List[Any] = []
def generate(UpperCAmelCase_ : int , UpperCAmelCase_ : list ):
_UpperCamelCase : Optional[int] = [0] * n
res.append(tuple(UpperCAmelCase_ ) )
_UpperCamelCase : List[Any] = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
_UpperCamelCase , _UpperCamelCase : List[str] = arr[i], arr[0]
else:
_UpperCamelCase , _UpperCamelCase : List[str] = arr[i], arr[c[i]]
res.append(tuple(UpperCAmelCase_ ) )
c[i] += 1
_UpperCamelCase : Tuple = 0
else:
_UpperCamelCase : Tuple = 0
i += 1
generate(len(UpperCAmelCase_ ) , UpperCAmelCase_ )
return res
if __name__ == "__main__":
lowerCAmelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 648 | 1 |
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
def __init__(self , __magic_name__ , __magic_name__=7 , __magic_name__=3 , __magic_name__=18 , __magic_name__=30 , __magic_name__=400 , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=[0.5, 0.5, 0.5] , __magic_name__=[0.5, 0.5, 0.5] , __magic_name__=False , ) -> int:
'''simple docstring'''
snake_case_ : int = size if size is not None else {'''height''': 20, '''width''': 20}
snake_case_ : int = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
snake_case_ : str = parent
snake_case_ : Optional[int] = batch_size
snake_case_ : Dict = num_channels
snake_case_ : List[Any] = image_size
snake_case_ : Union[str, Any] = min_resolution
snake_case_ : Tuple = max_resolution
snake_case_ : str = do_resize
snake_case_ : Tuple = size
snake_case_ : int = do_center_crop
snake_case_ : Tuple = crop_size
snake_case_ : int = do_normalize
snake_case_ : Optional[Any] = image_mean
snake_case_ : List[str] = image_std
snake_case_ : str = do_reduce_labels
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def lowerCamelCase_ ( ) -> List[Any]:
"""simple docstring"""
snake_case_ : Any = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
snake_case_ : Union[str, Any] = Image.open(dataset[0]['''file'''] )
snake_case_ : str = Image.open(dataset[1]['''file'''] )
return image, map
def lowerCamelCase_ ( ) -> List[Any]:
"""simple docstring"""
snake_case_ : str = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
snake_case_ : Optional[Any] = Image.open(ds[0]['''file'''] )
snake_case_ : Optional[Any] = Image.open(ds[1]['''file'''] )
snake_case_ : List[str] = Image.open(ds[2]['''file'''] )
snake_case_ : str = Image.open(ds[3]['''file'''] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class __lowerCAmelCase ( _a, unittest.TestCase ):
lowerCamelCase_ : List[Any] = BeitImageProcessor if is_vision_available() else None
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : int = BeitImageProcessingTester(self )
@property
def lowerCamelCase (self ) -> str:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__ , '''do_resize''' ) )
self.assertTrue(hasattr(__magic_name__ , '''size''' ) )
self.assertTrue(hasattr(__magic_name__ , '''do_center_crop''' ) )
self.assertTrue(hasattr(__magic_name__ , '''center_crop''' ) )
self.assertTrue(hasattr(__magic_name__ , '''do_normalize''' ) )
self.assertTrue(hasattr(__magic_name__ , '''image_mean''' ) )
self.assertTrue(hasattr(__magic_name__ , '''image_std''' ) )
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
snake_case_ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
self.assertEqual(image_processor.do_reduce_labels , __magic_name__ )
snake_case_ : Union[str, Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=__magic_name__ )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
self.assertEqual(image_processor.do_reduce_labels , __magic_name__ )
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
pass
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , Image.Image )
# Test not batched input
snake_case_ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
snake_case_ : Any = image_processing(__magic_name__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , numpify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , np.ndarray )
# Test not batched input
snake_case_ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
snake_case_ : Optional[int] = image_processing(__magic_name__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , torch.Tensor )
# Test not batched input
snake_case_ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
snake_case_ : List[str] = image_processing(__magic_name__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ )
snake_case_ : Union[str, Any] = []
for image in image_inputs:
self.assertIsInstance(__magic_name__ , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
snake_case_ : List[str] = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test batched
snake_case_ : Any = image_processing(__magic_name__ , __magic_name__ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test not batched input (PIL images)
snake_case_ , snake_case_ : Optional[int] = prepare_semantic_single_inputs()
snake_case_ : int = image_processing(__magic_name__ , __magic_name__ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test batched input (PIL images)
snake_case_ , snake_case_ : Dict = prepare_semantic_batch_inputs()
snake_case_ : Optional[int] = image_processing(__magic_name__ , __magic_name__ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
2,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
snake_case_ , snake_case_ : Tuple = prepare_semantic_single_inputs()
snake_case_ : Optional[int] = image_processing(__magic_name__ , __magic_name__ , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 150 )
snake_case_ : List[Any] = True
snake_case_ : int = image_processing(__magic_name__ , __magic_name__ , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
| 60 |
'''simple docstring'''
SCREAMING_SNAKE_CASE = {
"""meter""": """m""",
"""kilometer""": """km""",
"""megametre""": """Mm""",
"""gigametre""": """Gm""",
"""terametre""": """Tm""",
"""petametre""": """Pm""",
"""exametre""": """Em""",
"""zettametre""": """Zm""",
"""yottametre""": """Ym""",
}
# Exponent of the factor(meter)
SCREAMING_SNAKE_CASE = {
"""m""": 0,
"""km""": 3,
"""Mm""": 6,
"""Gm""": 9,
"""Tm""": 1_2,
"""Pm""": 1_5,
"""Em""": 1_8,
"""Zm""": 2_1,
"""Ym""": 2_4,
}
def snake_case_ ( lowercase__ , lowercase__ , lowercase__ ):
UpperCAmelCase__ : str = from_type.lower().strip("s" )
UpperCAmelCase__ : Any = to_type.lower().strip("s" )
UpperCAmelCase__ : Union[str, Any] = UNIT_SYMBOL.get(lowercase__ , lowercase__ )
UpperCAmelCase__ : Any = UNIT_SYMBOL.get(lowercase__ , lowercase__ )
if from_sanitized not in METRIC_CONVERSION:
UpperCAmelCase__ : str = (
F"""Invalid 'from_type' value: {from_type!r}.\n"""
F"""Conversion abbreviations are: {", ".join(lowercase__ )}"""
)
raise ValueError(lowercase__ )
if to_sanitized not in METRIC_CONVERSION:
UpperCAmelCase__ : List[str] = (
F"""Invalid 'to_type' value: {to_type!r}.\n"""
F"""Conversion abbreviations are: {", ".join(lowercase__ )}"""
)
raise ValueError(lowercase__ )
UpperCAmelCase__ : Optional[int] = METRIC_CONVERSION[from_sanitized]
UpperCAmelCase__ : str = METRIC_CONVERSION[to_sanitized]
UpperCAmelCase__ : Tuple = 1
if from_exponent > to_exponent:
UpperCAmelCase__ : Tuple = from_exponent - to_exponent
else:
UpperCAmelCase__ : Optional[int] = -(to_exponent - from_exponent)
return value * pow(1_0 , lowercase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 199 | 0 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowerCAmelCase_ = """platform"""
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: List[str] , lowerCAmelCase: int=None , lowerCAmelCase: List[str]=None , lowerCAmelCase: str=None , lowerCAmelCase: Union[str, Any]=None , lowerCAmelCase: Union[str, Any]=None , lowerCAmelCase: Dict=None , )-> Dict:
if attention_mask is None:
_snake_case : str = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
_snake_case : Any = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
_snake_case : Any = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_snake_case : Optional[int] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_snake_case : Tuple = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : int=13 , UpperCamelCase : List[Any]=7 , UpperCamelCase : Union[str, Any]=True , UpperCamelCase : Optional[Any]=False , UpperCamelCase : Optional[int]=99 , UpperCamelCase : Tuple=16 , UpperCamelCase : Union[str, Any]=2 , UpperCamelCase : int=4 , UpperCamelCase : List[str]=4 , UpperCamelCase : Optional[int]="gelu" , UpperCamelCase : Optional[Any]=0.1 , UpperCamelCase : str=0.1 , UpperCamelCase : List[Any]=32 , UpperCamelCase : str=2 , UpperCamelCase : Union[str, Any]=1 , UpperCamelCase : str=0 , UpperCamelCase : List[str]=0.02 , ):
'''simple docstring'''
_snake_case : List[str] = parent
_snake_case : Optional[int] = batch_size
_snake_case : List[str] = seq_length
_snake_case : str = is_training
_snake_case : Dict = use_labels
_snake_case : Any = vocab_size
_snake_case : Dict = hidden_size
_snake_case : Tuple = num_hidden_layers
_snake_case : Optional[Any] = num_attention_heads
_snake_case : Optional[int] = intermediate_size
_snake_case : Optional[int] = hidden_act
_snake_case : List[Any] = hidden_dropout_prob
_snake_case : Dict = attention_probs_dropout_prob
_snake_case : Optional[int] = max_position_embeddings
_snake_case : List[str] = eos_token_id
_snake_case : int = pad_token_id
_snake_case : Dict = bos_token_id
_snake_case : Tuple = initializer_range
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Optional[int] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
_snake_case : List[str] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
_snake_case : Any = shift_tokens_right(UpperCamelCase , 1 , 2 )
_snake_case : List[str] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=UpperCamelCase , )
_snake_case : Optional[int] = prepare_blenderbot_inputs_dict(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return config, inputs_dict
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : List[Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : str , UpperCamelCase : Any , UpperCamelCase : List[Any] ):
'''simple docstring'''
_snake_case : List[str] = 20
_snake_case : Optional[Any] = model_class_name(UpperCamelCase )
_snake_case : List[Any] = model.encode(inputs_dict['input_ids'] )
_snake_case : List[Any] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
_snake_case : Tuple = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase , UpperCamelCase )
_snake_case : Dict = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
_snake_case : Optional[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_snake_case : List[Any] = model.decode(
decoder_input_ids[:, :-1] , UpperCamelCase , decoder_attention_mask=UpperCamelCase , past_key_values=UpperCamelCase , decoder_position_ids=UpperCamelCase , )
_snake_case : List[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
_snake_case : Dict = model.decode(
decoder_input_ids[:, -1:] , UpperCamelCase , decoder_attention_mask=UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCamelCase , )
_snake_case : str = model.decode(UpperCamelCase , UpperCamelCase )
_snake_case : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def UpperCamelCase_ ( self : Any , UpperCamelCase : List[str] , UpperCamelCase : Optional[int] , UpperCamelCase : List[str] ):
'''simple docstring'''
_snake_case : Union[str, Any] = 20
_snake_case : Optional[Any] = model_class_name(UpperCamelCase )
_snake_case : Optional[Any] = model.encode(inputs_dict['input_ids'] )
_snake_case : Union[str, Any] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
_snake_case : List[Any] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_snake_case : List[Any] = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase , UpperCamelCase )
_snake_case : Optional[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_snake_case : int = model.decode(
decoder_input_ids[:, :-1] , UpperCamelCase , decoder_attention_mask=UpperCamelCase , past_key_values=UpperCamelCase , decoder_position_ids=UpperCamelCase , )
_snake_case : Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
_snake_case : Tuple = model.decode(
decoder_input_ids[:, -1:] , UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCamelCase , decoder_position_ids=UpperCamelCase , )
_snake_case : Optional[Any] = model.decode(UpperCamelCase , UpperCamelCase , decoder_attention_mask=UpperCamelCase )
_snake_case : List[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
a_ : Optional[int] =99
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : Union[str, Any] = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
_snake_case : int = input_ids.shape[0]
_snake_case : Any = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = self._get_config_and_data()
_snake_case : int = FlaxBlenderbotForConditionalGeneration(UpperCamelCase )
_snake_case : Optional[Any] = lm_model(input_ids=UpperCamelCase )
_snake_case : List[str] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape , UpperCamelCase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : Dict = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
_snake_case : Dict = FlaxBlenderbotForConditionalGeneration(UpperCamelCase )
_snake_case : Tuple = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
_snake_case : List[str] = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
_snake_case : str = lm_model(input_ids=UpperCamelCase , decoder_input_ids=UpperCamelCase )
_snake_case : Union[str, Any] = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape , UpperCamelCase )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : Tuple = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
_snake_case : Dict = shift_tokens_right(UpperCamelCase , 1 , 2 )
_snake_case : Union[str, Any] = np.equal(UpperCamelCase , 1 ).astype(np.floataa ).sum()
_snake_case : Dict = np.equal(UpperCamelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(UpperCamelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class _lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase , UpperCAmelCase_ ):
'''simple docstring'''
a_ : Tuple =True
a_ : Optional[int] =(
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
a_ : List[Any] =(FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : Tuple = FlaxBlenderbotModelTester(self )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : str = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCamelCase , UpperCamelCase , UpperCamelCase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCamelCase , UpperCamelCase , UpperCamelCase )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_snake_case : Dict = self._prepare_for_class(UpperCamelCase , UpperCamelCase )
_snake_case : List[Any] = model_class(UpperCamelCase )
@jax.jit
def encode_jitted(UpperCamelCase : str , UpperCamelCase : Dict=None , **UpperCamelCase : Union[str, Any] ):
return model.encode(input_ids=UpperCamelCase , attention_mask=UpperCamelCase )
with self.subTest('JIT Enabled' ):
_snake_case : int = encode_jitted(**UpperCamelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
_snake_case : List[str] = encode_jitted(**UpperCamelCase ).to_tuple()
self.assertEqual(len(UpperCamelCase ) , len(UpperCamelCase ) )
for jitted_output, output in zip(UpperCamelCase , UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_snake_case : Optional[int] = model_class(UpperCamelCase )
_snake_case : Optional[int] = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
_snake_case : Optional[Any] = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCamelCase : Optional[Any] , UpperCamelCase : List[str] , UpperCamelCase : str ):
return model.decode(
decoder_input_ids=UpperCamelCase , decoder_attention_mask=UpperCamelCase , encoder_outputs=UpperCamelCase , )
with self.subTest('JIT Enabled' ):
_snake_case : List[str] = decode_jitted(**UpperCamelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
_snake_case : int = decode_jitted(**UpperCamelCase ).to_tuple()
self.assertEqual(len(UpperCamelCase ) , len(UpperCamelCase ) )
for jitted_output, output in zip(UpperCamelCase , UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
_snake_case : Union[str, Any] = model_class_name.from_pretrained('facebook/blenderbot-400M-distill' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
_snake_case : List[Any] = np.ones((1, 1) ) * model.config.eos_token_id
_snake_case : Tuple = model(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
@unittest.skipUnless(jax_device != 'cpu' , '3B test too slow on CPU.' )
@slow
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_snake_case : List[Any] = {'num_beams': 1, 'early_stopping': True, 'min_length': 15, 'max_length': 25}
_snake_case : List[Any] = {'skip_special_tokens': True, 'clean_up_tokenization_spaces': True}
_snake_case : List[str] = FlaxBlenderbotForConditionalGeneration.from_pretrained('facebook/blenderbot-3B' , from_pt=UpperCamelCase )
_snake_case : Optional[int] = BlenderbotTokenizer.from_pretrained('facebook/blenderbot-3B' )
_snake_case : Dict = ['Sam']
_snake_case : Union[str, Any] = tokenizer(UpperCamelCase , return_tensors='jax' )
_snake_case : Any = model.generate(**UpperCamelCase , **UpperCamelCase )
_snake_case : Optional[Any] = 'Sam is a great name. It means "sun" in Gaelic.'
_snake_case : Tuple = tokenizer.batch_decode(UpperCamelCase , **UpperCamelCase )
assert generated_txt[0].strip() == tgt_text
| 710 |
def lowerCamelCase_ ( lowerCAmelCase: int )-> list:
_snake_case : List[Any] = int(lowerCAmelCase )
if n_element < 1:
_snake_case : int = ValueError('a should be a positive number' )
raise my_error
_snake_case : Union[str, Any] = [1]
_snake_case , _snake_case , _snake_case : Any = (0, 0, 0)
_snake_case : str = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowerCAmelCase_ = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
lowerCAmelCase_ = hamming(int(n))
print("""-----------------------------------------------------""")
print(F"""The list with nth numbers is: {hamming_numbers}""")
print("""-----------------------------------------------------""")
| 669 | 0 |
"""simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class lowerCAmelCase__ :
def __init__( self : Dict , _lowerCamelCase : str , _lowerCamelCase : Union[str, Any]=13 , _lowerCamelCase : Any=7 , _lowerCamelCase : str=False , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : List[str]=False , _lowerCamelCase : Optional[int]=False , _lowerCamelCase : Union[str, Any]=19 , _lowerCamelCase : Optional[int]=32 , _lowerCamelCase : int=5 , _lowerCamelCase : Any=4 , _lowerCamelCase : Dict=37 , _lowerCamelCase : List[Any]="gelu" , _lowerCamelCase : Any=0.1 , _lowerCamelCase : str=0.1 , _lowerCamelCase : int=512 , _lowerCamelCase : List[str]=16 , _lowerCamelCase : int=2 , _lowerCamelCase : Tuple=0.0_2 , _lowerCamelCase : int=3 , _lowerCamelCase : List[Any]=4 , _lowerCamelCase : Optional[int]=None , ):
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_input_mask
_snake_case = use_token_type_ids
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = num_labels
_snake_case = num_choices
_snake_case = scope
def lowercase ( self : Union[str, Any] ):
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = None
if self.use_input_mask:
_snake_case = random_attention_mask([self.batch_size, self.seq_length] )
_snake_case = None
_snake_case = None
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case = ids_tensor([self.batch_size] , self.num_choices )
_snake_case = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase ( self : List[Any] ):
_snake_case = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=_lowerCamelCase , esmfold_config={'''trunk''': {'''num_blocks''': 2}, '''fp16_esm''': False} , )
return config
def lowercase ( self : Union[str, Any] , _lowerCamelCase : Tuple , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : Tuple ):
_snake_case = EsmForProteinFolding(config=_lowerCamelCase ).float()
model.to(_lowerCamelCase )
model.eval()
_snake_case = model(_lowerCamelCase , attention_mask=_lowerCamelCase )
_snake_case = model(_lowerCamelCase )
_snake_case = model(_lowerCamelCase )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def lowercase ( self : Tuple ):
_snake_case = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) = config_and_inputs
_snake_case = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( A_ , A_ , unittest.TestCase ):
__a = False
__a = (EsmForProteinFolding,) if is_torch_available() else ()
__a = ()
__a = {} if is_torch_available() else {}
__a = False
def lowercase ( self : Tuple ):
_snake_case = EsmFoldModelTester(self )
_snake_case = ConfigTester(self , config_class=_lowerCamelCase , hidden_size=37 )
def lowercase ( self : Any ):
self.config_tester.run_common_tests()
def lowercase ( self : Optional[int] ):
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
@unittest.skip('''Does not support attention outputs''' )
def lowercase ( self : Dict ):
pass
@unittest.skip
def lowercase ( self : Dict ):
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def lowercase ( self : Tuple ):
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def lowercase ( self : int ):
pass
@unittest.skip('''ESMFold does not support passing input embeds!''' )
def lowercase ( self : int ):
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def lowercase ( self : List[str] ):
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def lowercase ( self : Tuple ):
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def lowercase ( self : Any ):
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def lowercase ( self : Union[str, Any] ):
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def lowercase ( self : Optional[Any] ):
pass
@unittest.skip('''ESMFold does not output hidden states in the normal way.''' )
def lowercase ( self : Dict ):
pass
@unittest.skip('''ESMfold does not output hidden states in the normal way.''' )
def lowercase ( self : int ):
pass
@unittest.skip('''ESMFold only has one output format.''' )
def lowercase ( self : str ):
pass
@unittest.skip('''This test doesn\'t work for ESMFold and doesn\'t test core functionality''' )
def lowercase ( self : List[str] ):
pass
@unittest.skip('''ESMFold does not support input chunking.''' )
def lowercase ( self : Any ):
pass
@unittest.skip('''ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.''' )
def lowercase ( self : Any ):
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def lowercase ( self : Any ):
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def lowercase ( self : Dict ):
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def lowercase ( self : Union[str, Any] ):
pass
@unittest.skip('''ESMFold doesn\'t support data parallel.''' )
def lowercase ( self : List[Any] ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase ( self : List[Any] ):
pass
@require_torch
class lowerCAmelCase__ ( A_ ):
@slow
def lowercase ( self : str ):
_snake_case = EsmForProteinFolding.from_pretrained('''facebook/esmfold_v1''' ).float()
model.eval()
_snake_case = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
_snake_case = model(_lowerCamelCase )['''positions''']
_snake_case = torch.tensor([2.5_8_2_8, 0.7_9_9_3, -1_0.9_3_3_4] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , _lowerCamelCase , atol=1e-4 ) )
| 224 |
"""simple docstring"""
from pathlib import Path
import fire
from tqdm import tqdm
def _UpperCAmelCase ( __lowerCamelCase : Union[str, Any]="ro" , __lowerCamelCase : Optional[Any]="en" , __lowerCamelCase : Optional[int]="wmt16" , __lowerCamelCase : Tuple=None ) -> None:
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('''run pip install datasets''' )
_snake_case = f'''{src_lang}-{tgt_lang}'''
print(f'''Converting {dataset}-{pair}''' )
_snake_case = datasets.load_dataset(__lowerCamelCase , __lowerCamelCase )
if save_dir is None:
_snake_case = f'''{dataset}-{pair}'''
_snake_case = Path(__lowerCamelCase )
save_dir.mkdir(exist_ok=__lowerCamelCase )
for split in ds.keys():
print(f'''Splitting {split} with {ds[split].num_rows} records''' )
# to save to val.source, val.target like summary datasets
_snake_case = '''val''' if split == '''validation''' else split
_snake_case = save_dir.joinpath(f'''{fn}.source''' )
_snake_case = save_dir.joinpath(f'''{fn}.target''' )
_snake_case = src_path.open('''w+''' )
_snake_case = tgt_path.open('''w+''' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
_snake_case = x['''translation''']
src_fp.write(ex[src_lang] + '''\n''' )
tgt_fp.write(ex[tgt_lang] + '''\n''' )
print(f'''Saved {dataset} dataset to {save_dir}''' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 224 | 1 |
"""simple docstring"""
import math
_lowerCAmelCase = 10
_lowerCAmelCase = 7
_lowerCAmelCase = BALLS_PER_COLOUR * NUM_COLOURS
def __UpperCamelCase ( snake_case__ = 20 ):
A_ : str = math.comb(snake_case__ , snake_case__ )
A_ : Union[str, Any] = math.comb(NUM_BALLS - BALLS_PER_COLOUR , snake_case__ )
A_ : Dict = NUM_COLOURS * (1 - missing_colour / total)
return F"""{result:.9f}"""
if __name__ == "__main__":
print(solution(20))
| 480 |
"""simple docstring"""
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
_lowerCAmelCase = "."
if __name__ == "__main__":
_lowerCAmelCase = os.path.join(REPO_PATH, "utils/documentation_tests.txt")
_lowerCAmelCase = []
_lowerCAmelCase = []
with open(doctest_file_path) as fp:
for line in fp:
_lowerCAmelCase = line.strip()
_lowerCAmelCase = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
_lowerCAmelCase = "\n".join(non_existent_paths)
raise ValueError(F'`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}')
if all_paths != sorted(all_paths):
raise ValueError("Files in `utils/documentation_tests.txt` are not in alphabetical order.")
| 480 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__A : Tuple = {
'''configuration_mask2former''': [
'''MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Mask2FormerConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = ['''Mask2FormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = [
'''MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Mask2FormerForUniversalSegmentation''',
'''Mask2FormerModel''',
'''Mask2FormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
__A : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 231 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
__A : Tuple = {
'''cola''': 2,
'''mnli''': 3,
'''mrpc''': 2,
'''sst-2''': 2,
'''sts-b''': 1,
'''qqp''': 2,
'''qnli''': 2,
'''rte''': 2,
'''wnli''': 2,
}
logging.set_verbosity_info()
def lowercase ( __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : str=None ):
# Initialise PyTorch model
lowercase_ : List[Any] = XLNetConfig.from_json_file(__snake_case )
lowercase_ : Any = finetuning_task.lower() if finetuning_task is not None else ''''''
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F'''Building PyTorch XLNetForSequenceClassification model from configuration: {config}''' )
lowercase_ : Optional[Any] = finetuning_task
lowercase_ : Optional[int] = GLUE_TASKS_NUM_LABELS[finetuning_task]
lowercase_ : List[str] = XLNetForSequenceClassification(__snake_case )
elif "squad" in finetuning_task:
lowercase_ : Dict = finetuning_task
lowercase_ : List[str] = XLNetForQuestionAnswering(__snake_case )
else:
lowercase_ : Union[str, Any] = XLNetLMHeadModel(__snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(__snake_case , __snake_case , __snake_case )
# Save pytorch-model
lowercase_ : Tuple = os.path.join(__snake_case , __snake_case )
lowercase_ : Tuple = os.path.join(__snake_case , __snake_case )
print(F'''Save PyTorch model to {os.path.abspath(__snake_case )}''' )
torch.save(model.state_dict() , __snake_case )
print(F'''Save configuration file to {os.path.abspath(__snake_case )}''' )
with open(__snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__A : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--xlnet_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained XLNet model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--finetuning_task''',
default=None,
type=str,
help='''Name of a task on which the XLNet TensorFlow model was fine-tuned''',
)
__A : int = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 231 | 1 |
'''simple docstring'''
import argparse
import copy
def __A ( _SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = {}
with open(__UpperCamelCase ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
__SCREAMING_SNAKE_CASE : Dict = []
_list.append([line.split()[1], line.split()[2]] )
__SCREAMING_SNAKE_CASE : int = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
__SCREAMING_SNAKE_CASE : Optional[Any] = []
_list.append([line.split()[0], line.split()[2]] )
__SCREAMING_SNAKE_CASE : Optional[Any] = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def __A ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
with open(__UpperCamelCase ) as f:
__SCREAMING_SNAKE_CASE : str = f.read(1 )
__SCREAMING_SNAKE_CASE : Any = start_node
__SCREAMING_SNAKE_CASE : Dict = []
__SCREAMING_SNAKE_CASE : Union[str, Any] = start_node
__SCREAMING_SNAKE_CASE : List[Any] = 0
while visiting not in first_solution:
__SCREAMING_SNAKE_CASE : List[Any] = 1_0_0_0_0
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(__UpperCamelCase ) and k[0] not in first_solution:
__SCREAMING_SNAKE_CASE : Dict = k[1]
__SCREAMING_SNAKE_CASE : Optional[int] = k[0]
first_solution.append(__UpperCamelCase )
__SCREAMING_SNAKE_CASE : List[Any] = distance_of_first_solution + int(__UpperCamelCase )
__SCREAMING_SNAKE_CASE : Dict = best_node
first_solution.append(__UpperCamelCase )
__SCREAMING_SNAKE_CASE : Dict = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
__SCREAMING_SNAKE_CASE : int = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0_0_0_0
)
return first_solution, distance_of_first_solution
def __A ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = []
for n in solution[1:-1]:
__SCREAMING_SNAKE_CASE : Optional[int] = solution.index(__UpperCamelCase )
for kn in solution[1:-1]:
__SCREAMING_SNAKE_CASE : Tuple = solution.index(__UpperCamelCase )
if n == kn:
continue
__SCREAMING_SNAKE_CASE : List[Any] = copy.deepcopy(__UpperCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = kn
__SCREAMING_SNAKE_CASE : Dict = n
__SCREAMING_SNAKE_CASE : List[str] = 0
for k in _tmp[:-1]:
__SCREAMING_SNAKE_CASE : Any = _tmp[_tmp.index(__UpperCamelCase ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
__SCREAMING_SNAKE_CASE : Dict = distance + int(i[1] )
_tmp.append(__UpperCamelCase )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
__SCREAMING_SNAKE_CASE : Optional[int] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda _SCREAMING_SNAKE_CASE : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def __A ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = 1
__SCREAMING_SNAKE_CASE : int = first_solution
__SCREAMING_SNAKE_CASE : Tuple = []
__SCREAMING_SNAKE_CASE : Tuple = distance_of_first_solution
__SCREAMING_SNAKE_CASE : Dict = solution
while count <= iters:
__SCREAMING_SNAKE_CASE : Dict = find_neighborhood(__UpperCamelCase , __UpperCamelCase )
__SCREAMING_SNAKE_CASE : Dict = 0
__SCREAMING_SNAKE_CASE : int = neighborhood[index_of_best_solution]
__SCREAMING_SNAKE_CASE : int = len(__UpperCamelCase ) - 1
__SCREAMING_SNAKE_CASE : Any = False
while not found:
__SCREAMING_SNAKE_CASE : Dict = 0
while i < len(__UpperCamelCase ):
if best_solution[i] != solution[i]:
__SCREAMING_SNAKE_CASE : Tuple = best_solution[i]
__SCREAMING_SNAKE_CASE : Any = solution[i]
break
__SCREAMING_SNAKE_CASE : Tuple = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
__SCREAMING_SNAKE_CASE : Optional[int] = True
__SCREAMING_SNAKE_CASE : Union[str, Any] = best_solution[:-1]
__SCREAMING_SNAKE_CASE : Any = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
__SCREAMING_SNAKE_CASE : List[str] = cost
__SCREAMING_SNAKE_CASE : str = solution
else:
__SCREAMING_SNAKE_CASE : Any = index_of_best_solution + 1
__SCREAMING_SNAKE_CASE : List[Any] = neighborhood[index_of_best_solution]
if len(__UpperCamelCase ) >= size:
tabu_list.pop(0 )
__SCREAMING_SNAKE_CASE : Optional[Any] = count + 1
return best_solution_ever, best_cost
def __A ( _SCREAMING_SNAKE_CASE : List[str]=None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = generate_neighbours(args.File )
__SCREAMING_SNAKE_CASE : str = generate_first_solution(
args.File , __UpperCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = tabu_search(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , args.Iterations , args.Size , )
print(f'Best solution: {best_sol}, with total distance: {best_cost}.' )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser(description='''Tabu Search''')
parser.add_argument(
'''-f''',
'''--File''',
type=str,
help='''Path to the file containing the data''',
required=True,
)
parser.add_argument(
'''-i''',
'''--Iterations''',
type=int,
help='''How many iterations the algorithm should perform''',
required=True,
)
parser.add_argument(
'''-s''', '''--Size''', type=int, help='''Size of the tabu list''', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 721 |
'''simple docstring'''
def __A ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = 0
__SCREAMING_SNAKE_CASE : Optional[int] = len(_SCREAMING_SNAKE_CASE ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__SCREAMING_SNAKE_CASE : List[str] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(_SCREAMING_SNAKE_CASE ):
return None
__SCREAMING_SNAKE_CASE : int = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
__SCREAMING_SNAKE_CASE : Tuple = left
__SCREAMING_SNAKE_CASE : Any = point
elif point > right:
__SCREAMING_SNAKE_CASE : Tuple = right
__SCREAMING_SNAKE_CASE : Dict = point
else:
if item < current_item:
__SCREAMING_SNAKE_CASE : Optional[int] = point - 1
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = point + 1
return None
def __A ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__SCREAMING_SNAKE_CASE : Tuple = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(_SCREAMING_SNAKE_CASE ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif point > right:
return interpolation_search_by_recursion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , point - 1 )
else:
return interpolation_search_by_recursion(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , point + 1 , _SCREAMING_SNAKE_CASE )
def __A ( _SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
if collection != sorted(_SCREAMING_SNAKE_CASE ):
raise ValueError("Collection must be ascending sorted" )
return True
if __name__ == "__main__":
import sys
lowercase = 0
if debug == 1:
lowercase = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('''Sequence must be ascending sorted to apply interpolation search''')
lowercase = 67
lowercase = interpolation_search(collection, target)
if result is not None:
print(F"""{target} found at positions: {result}""")
else:
print('''Not found''')
| 564 | 0 |
"""simple docstring"""
from datetime import datetime as dt
import os
from github import Github
_lowercase = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def _snake_case ( ):
A = Github(os.environ['GITHUB_TOKEN'] )
A = g.get_repo('huggingface/transformers' )
A = repo.get_issues(state='open' )
for issue in open_issues:
A = sorted([comment for comment in issue.get_comments()] , key=lambda snake_case__ : i.created_at , reverse=snake_case__ )
A = comments[0] if len(snake_case__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='closed' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main() | 91 |
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
lowerCAmelCase__ : Optional[int] =0B1011_0011_1110_1100_1001_0000_0111_1011_1011_0001_1001_1110
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
lowerCAmelCase__ : str =[int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class __lowercase :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = WATERMARK_BITS
SCREAMING_SNAKE_CASE_ : int = WatermarkEncoder()
self.encoder.set_watermark('bits' , self.watermark )
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
if images.shape[-1] < 2_5_6:
return images
SCREAMING_SNAKE_CASE_ : Tuple = (2_5_5 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
SCREAMING_SNAKE_CASE_ : Optional[int] = [self.encoder.encode(lowerCAmelCase__ , 'dwtDct' ) for image in images]
SCREAMING_SNAKE_CASE_ : List[str] = torch.from_numpy(np.array(lowerCAmelCase__ ) ).permute(0 , 3 , 1 , 2 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.clamp(2 * (images / 2_5_5 - 0.5) , min=-1.0 , max=1.0 )
return images
| 101 | 0 |
"""simple docstring"""
from collections.abc import Iterable
from typing import Any
class lowercase__ :
'''simple docstring'''
def __init__( self , snake_case = None ) -> Tuple:
_UpperCAmelCase = value
_UpperCAmelCase = None # Added in order to delete a node easier
_UpperCAmelCase = None
_UpperCAmelCase = None
def __repr__( self ) -> Tuple:
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({f'{self.value}': (self.left, self.right)} , indent=1 )
class lowercase__ :
'''simple docstring'''
def __init__( self , snake_case = None ) -> Union[str, Any]:
_UpperCAmelCase = root
def __str__( self ) -> Union[str, Any]:
return str(self.root )
def lowerCamelCase_ ( self , snake_case , snake_case ) -> Dict:
if new_children is not None: # reset its kids
_UpperCAmelCase = node.parent
if node.parent is not None: # reset its parent
if self.is_right(__A ): # If it is the right children
_UpperCAmelCase = new_children
else:
_UpperCAmelCase = new_children
else:
_UpperCAmelCase = new_children
def lowerCamelCase_ ( self , snake_case ) -> Optional[int]:
if node.parent and node.parent.right:
return node == node.parent.right
return False
def lowerCamelCase_ ( self ) -> Optional[Any]:
return self.root is None
def lowerCamelCase_ ( self , snake_case ) -> Any:
_UpperCAmelCase = Node(__A ) # create a new Node
if self.empty(): # if Tree is empty
_UpperCAmelCase = new_node # set its root
else: # Tree is not empty
_UpperCAmelCase = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
_UpperCAmelCase = new_node # We insert the new node in a leaf
break
else:
_UpperCAmelCase = parent_node.left
else:
if parent_node.right is None:
_UpperCAmelCase = new_node
break
else:
_UpperCAmelCase = parent_node.right
_UpperCAmelCase = parent_node
def lowerCamelCase_ ( self , *snake_case ) -> List[str]:
for value in values:
self.__insert(__A )
def lowerCamelCase_ ( self , snake_case ) -> Any:
if self.empty():
raise IndexError('Warning: Tree is empty! please use another.' )
else:
_UpperCAmelCase = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
_UpperCAmelCase = node.left if value < node.value else node.right
return node
def lowerCamelCase_ ( self , snake_case = None ) -> Tuple:
if node is None:
if self.root is None:
return None
_UpperCAmelCase = self.root
if not self.empty():
while node.right is not None:
_UpperCAmelCase = node.right
return node
def lowerCamelCase_ ( self , snake_case = None ) -> Optional[int]:
if node is None:
_UpperCAmelCase = self.root
if self.root is None:
return None
if not self.empty():
_UpperCAmelCase = self.root
while node.left is not None:
_UpperCAmelCase = node.left
return node
def lowerCamelCase_ ( self , snake_case ) -> Optional[Any]:
_UpperCAmelCase = self.search(__A ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(__A , __A )
elif node.left is None: # Has only right children
self.__reassign_nodes(__A , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(__A , node.left )
else:
_UpperCAmelCase = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
_UpperCAmelCase = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def lowerCamelCase_ ( self , snake_case ) -> Optional[int]:
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def lowerCamelCase_ ( self , snake_case=None ) -> int:
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def lowerCamelCase_ ( self , snake_case , snake_case ) -> Optional[Any]:
if node:
self.inorder(__A , node.left )
arr.append(node.value )
self.inorder(__A , node.right )
def lowerCamelCase_ ( self , snake_case , snake_case ) -> Dict:
_UpperCAmelCase = []
self.inorder(__A , __A ) # append all values to list using inorder traversal
return arr[k - 1]
def UpperCAmelCase ( A : Node | None ):
'''simple docstring'''
_UpperCAmelCase = []
if curr_node is not None:
_UpperCAmelCase = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def UpperCAmelCase ( ):
'''simple docstring'''
_UpperCAmelCase = (8, 3, 6, 1, 10, 14, 13, 4, 7)
_UpperCAmelCase = BinarySearchTree()
for i in testlist:
t.insert(snake_case_ )
# Prints all the elements of the list in order traversal
print(snake_case_ )
if t.search(6 ) is not None:
print('The value 6 exists' )
else:
print('The value 6 doesn\'t exist' )
if t.search(-1 ) is not None:
print('The value -1 exists' )
else:
print('The value -1 doesn\'t exist' )
if not t.empty():
print('Max Value: ' , t.get_max().value ) # type: ignore
print('Min Value: ' , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(snake_case_ )
print(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 702 |
"""simple docstring"""
from itertools import permutations
def UpperCAmelCase ( A : tuple ):
'''simple docstring'''
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
_UpperCAmelCase = [7, 11, 13, 17]
for i, test in enumerate(A ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def UpperCAmelCase ( A : int = 10 ):
'''simple docstring'''
return sum(
int(''.join(map(A , A ) ) )
for num in permutations(range(A ) )
if is_substring_divisible(A ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 24 | 0 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int=13 , __SCREAMING_SNAKE_CASE : Optional[int]=30 , __SCREAMING_SNAKE_CASE : Optional[Any]=2 , __SCREAMING_SNAKE_CASE : Optional[int]=3 , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : Dict=32 , __SCREAMING_SNAKE_CASE : Union[str, Any]=2 , __SCREAMING_SNAKE_CASE : str=4 , __SCREAMING_SNAKE_CASE : Dict=37 , __SCREAMING_SNAKE_CASE : Any="gelu" , __SCREAMING_SNAKE_CASE : Tuple=0.1 , __SCREAMING_SNAKE_CASE : Tuple=0.1 , __SCREAMING_SNAKE_CASE : int=10 , __SCREAMING_SNAKE_CASE : Tuple=0.02 , __SCREAMING_SNAKE_CASE : List[Any]=3 , __SCREAMING_SNAKE_CASE : Optional[Any]=None , ) -> List[Any]:
a_ : List[Any] = parent
a_ : int = batch_size
a_ : Any = image_size
a_ : Any = patch_size
a_ : Optional[Any] = num_channels
a_ : Optional[Any] = is_training
a_ : Dict = use_labels
a_ : int = hidden_size
a_ : Tuple = num_hidden_layers
a_ : Dict = num_attention_heads
a_ : List[Any] = intermediate_size
a_ : Optional[int] = hidden_act
a_ : Optional[int] = hidden_dropout_prob
a_ : Any = attention_probs_dropout_prob
a_ : Dict = type_sequence_label_size
a_ : List[Any] = initializer_range
a_ : Tuple = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
a_ : List[Any] = (image_size // patch_size) ** 2
a_ : List[Any] = num_patches + 1
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
a_ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a_ : Union[str, Any] = None
if self.use_labels:
a_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ : List[Any] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A__ , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE ( self : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple ) -> List[str]:
a_ : Any = TFViTModel(config=A__ )
a_ : Dict = model(A__ , training=A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
a_ : List[str] = self.image_size // 2
a_ : Optional[Any] = pixel_values[:, :, :image_size, :image_size]
a_ : Tuple = model(A__ , interpolate_pos_encoding=A__ , training=A__ )
a_ : Any = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int ) -> Optional[int]:
a_ : Union[str, Any] = self.type_sequence_label_size
a_ : List[Any] = TFViTForImageClassification(A__ )
a_ : int = model(A__ , labels=A__ , training=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
a_ : List[str] = self.image_size // 2
a_ : str = pixel_values[:, :, :image_size, :image_size]
a_ : int = model(A__ , interpolate_pos_encoding=A__ , training=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
a_ : Any = 1
a_ : int = TFViTForImageClassification(A__ )
a_ : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a_ : Dict = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
a_ : Any = self.prepare_config_and_inputs()
a_ , a_ , a_ : Union[str, Any] = config_and_inputs
a_ : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE ( _lowercase , _lowercase , unittest.TestCase ):
snake_case__ = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
snake_case__ = (
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
snake_case__ = False
snake_case__ = False
snake_case__ = False
def SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
a_ : Union[str, Any] = TFViTModelTester(self )
a_ : Dict = ConfigTester(self , config_class=A__ , has_text_modality=A__ , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
pass
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
pass
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
a_ , a_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : int = model_class(A__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
a_ : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A__ , tf.keras.layers.Layer ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
a_ , a_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : Optional[int] = model_class(A__ )
a_ : Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ : Tuple = [*signature.parameters.keys()]
a_ : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A__ )
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
a_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
a_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A__ )
@slow
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
a_ : Tuple = TFViTModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(A__ )
def _UpperCAmelCase ( ):
a_ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
a_ : Optional[Any] = TFViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' )
a_ : Optional[int] = self.default_image_processor
a_ : Tuple = prepare_img()
a_ : Optional[int] = image_processor(images=A__ , return_tensors='''tf''' )
# forward pass
a_ : str = model(**A__ )
# verify the logits
a_ : int = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , A__ )
a_ : str = tf.constant([-0.2744, 0.8215, -0.0836] )
tf.debugging.assert_near(outputs.logits[0, :3] , A__ , atol=1e-4 )
| 466 |
'''simple docstring'''
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
__lowerCamelCase : Optional[Any] = 256
class UpperCAmelCase ( _lowercase ):
UpperCAmelCase : Union[str, Any] = ['''melgan''']
def __init__(self : Optional[Any] , A__ : SpectrogramNotesEncoder , A__ : SpectrogramContEncoder , A__ : TaFilmDecoder , A__ : DDPMScheduler , A__ : OnnxRuntimeModel if is_onnx_available() else Any , ) -> None:
super().__init__()
# From MELGAN
lowercase = math.log(1e-5 ) # Matches MelGAN training.
lowercase = 4.0 # Largest value for most examples
lowercase = 1_2_8
self.register_modules(
notes_encoder=A__ , continuous_encoder=A__ , decoder=A__ , scheduler=A__ , melgan=A__ , )
def UpperCAmelCase__ (self : Union[str, Any] , A__ : Any , A__ : Tuple=(-1.0, 1.0) , A__ : Any=False ) -> Any:
lowercase , lowercase = output_range
if clip:
lowercase = torch.clip(A__ , self.min_value , self.max_value )
# Scale to [0, 1].
lowercase = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def UpperCAmelCase__ (self : Tuple , A__ : Any , A__ : List[str]=(-1.0, 1.0) , A__ : Any=False ) -> str:
lowercase , lowercase = input_range
lowercase = torch.clip(A__ , A__ , A__ ) if clip else outputs
# Scale to [0, 1].
lowercase = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def UpperCAmelCase__ (self : List[str] , A__ : Optional[int] , A__ : Optional[Any] , A__ : List[Any] ) -> Dict:
lowercase = input_tokens > 0
lowercase , lowercase = self.notes_encoder(
encoder_input_tokens=A__ , encoder_inputs_mask=A__ )
lowercase , lowercase = self.continuous_encoder(
encoder_inputs=A__ , encoder_inputs_mask=A__ )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def UpperCAmelCase__ (self : int , A__ : int , A__ : Optional[int] , A__ : List[Any] ) -> str:
lowercase = noise_time
if not torch.is_tensor(A__ ):
lowercase = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(A__ ) and len(timesteps.shape ) == 0:
lowercase = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowercase = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
lowercase = self.decoder(
encodings_and_masks=A__ , decoder_input_tokens=A__ , decoder_noise_time=A__ )
return logits
@torch.no_grad()
def __call__(self : int , A__ : List[List[int]] , A__ : Optional[torch.Generator] = None , A__ : int = 1_0_0 , A__ : bool = True , A__ : str = "numpy" , A__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , A__ : int = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A__ , A__ ) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(A__ )}.' )
lowercase = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
lowercase = np.zeros([1, 0, self.n_dims] , np.floataa )
lowercase = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=A__ , device=self.device )
for i, encoder_input_tokens in enumerate(A__ ):
if i == 0:
lowercase = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
lowercase = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=A__ , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
lowercase = ones
lowercase = self.scale_features(
A__ , output_range=[-1.0, 1.0] , clip=A__ )
lowercase = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=A__ , continuous_mask=A__ , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
lowercase = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=A__ , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(A__ )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowercase = self.decode(
encodings_and_masks=A__ , input_tokens=A__ , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
lowercase = self.scheduler.step(A__ , A__ , A__ , generator=A__ ).prev_sample
lowercase = self.scale_to_features(A__ , input_range=[-1.0, 1.0] )
lowercase = mel[:1]
lowercase = mel.cpu().float().numpy()
lowercase = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A__ , A__ )
logger.info("Generated segment" , A__ )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'." )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'." )
if output_type == "numpy":
lowercase = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
lowercase = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=A__ )
| 310 | 0 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class a ( __lowerCamelCase ):
def __init__( self :Tuple ,__lowercase :str ,__lowercase :Any=1_3 ,__lowercase :Optional[Any]=7 ,__lowercase :Union[str, Any]=True ,__lowercase :Union[str, Any]=True ,__lowercase :Tuple=False ,__lowercase :int=True ,__lowercase :str=9_9 ,__lowercase :List[str]=3_2 ,__lowercase :Tuple=5 ,__lowercase :Optional[Any]=4 ,__lowercase :Union[str, Any]=3_7 ,__lowercase :int="gelu" ,__lowercase :int=0.1 ,__lowercase :Union[str, Any]=0.1 ,__lowercase :Dict=5_1_2 ,__lowercase :str=1_6 ,__lowercase :Optional[Any]=2 ,__lowercase :Dict=0.02 ,__lowercase :Union[str, Any]=3 ,__lowercase :List[str]=4 ,__lowercase :Tuple=None ,):
snake_case__ : Optional[int] = parent
snake_case__ : Dict = batch_size
snake_case__ : List[Any] = seq_length
snake_case__ : Any = is_training
snake_case__ : str = use_input_mask
snake_case__ : List[Any] = use_token_type_ids
snake_case__ : List[str] = use_labels
snake_case__ : Any = vocab_size
snake_case__ : Optional[int] = hidden_size
snake_case__ : Optional[Any] = num_hidden_layers
snake_case__ : Union[str, Any] = num_attention_heads
snake_case__ : int = intermediate_size
snake_case__ : Tuple = hidden_act
snake_case__ : Optional[Any] = hidden_dropout_prob
snake_case__ : Union[str, Any] = attention_probs_dropout_prob
snake_case__ : List[str] = max_position_embeddings
snake_case__ : str = type_vocab_size
snake_case__ : Any = type_sequence_label_size
snake_case__ : List[Any] = initializer_range
snake_case__ : List[Any] = num_labels
snake_case__ : Tuple = num_choices
snake_case__ : List[Any] = scope
def __lowerCamelCase ( self :Any ):
snake_case__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
snake_case__ : List[Any] = None
if self.use_input_mask:
snake_case__ : int = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : Any = None
snake_case__ : Tuple = None
snake_case__ : List[Any] = None
if self.use_labels:
snake_case__ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
snake_case__ : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
snake_case__ : Optional[Any] = ids_tensor([self.batch_size] ,self.num_choices )
snake_case__ : Union[str, Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self :Union[str, Any] ):
return DistilBertConfig(
vocab_size=self.vocab_size ,dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,hidden_dim=self.intermediate_size ,hidden_act=self.hidden_act ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,)
def __lowerCamelCase ( self :Optional[Any] ,__lowercase :List[Any] ,__lowercase :Dict ,__lowercase :List[Any] ,__lowercase :Union[str, Any] ,__lowercase :int ,__lowercase :Union[str, Any] ):
snake_case__ : Dict = DistilBertModel(config=__lowercase )
model.to(__lowercase )
model.eval()
snake_case__ : Optional[Any] = model(__lowercase ,__lowercase )
snake_case__ : Dict = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self :Tuple ,__lowercase :Tuple ,__lowercase :int ,__lowercase :str ,__lowercase :List[Any] ,__lowercase :Tuple ,__lowercase :int ):
snake_case__ : Dict = DistilBertForMaskedLM(config=__lowercase )
model.to(__lowercase )
model.eval()
snake_case__ : List[Any] = model(__lowercase ,attention_mask=__lowercase ,labels=__lowercase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self :List[Any] ,__lowercase :Optional[int] ,__lowercase :Dict ,__lowercase :Any ,__lowercase :Dict ,__lowercase :Dict ,__lowercase :int ):
snake_case__ : Any = DistilBertForQuestionAnswering(config=__lowercase )
model.to(__lowercase )
model.eval()
snake_case__ : int = model(
__lowercase ,attention_mask=__lowercase ,start_positions=__lowercase ,end_positions=__lowercase )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def __lowerCamelCase ( self :List[Any] ,__lowercase :str ,__lowercase :int ,__lowercase :int ,__lowercase :List[Any] ,__lowercase :Dict ,__lowercase :str ):
snake_case__ : Optional[Any] = self.num_labels
snake_case__ : Tuple = DistilBertForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
snake_case__ : Optional[int] = model(__lowercase ,attention_mask=__lowercase ,labels=__lowercase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowerCamelCase ( self :Any ,__lowercase :List[str] ,__lowercase :Optional[int] ,__lowercase :str ,__lowercase :Dict ,__lowercase :Optional[Any] ,__lowercase :Union[str, Any] ):
snake_case__ : List[str] = self.num_labels
snake_case__ : List[Any] = DistilBertForTokenClassification(config=__lowercase )
model.to(__lowercase )
model.eval()
snake_case__ : Any = model(__lowercase ,attention_mask=__lowercase ,labels=__lowercase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self :Tuple ,__lowercase :Dict ,__lowercase :List[str] ,__lowercase :Optional[int] ,__lowercase :Tuple ,__lowercase :Tuple ,__lowercase :Union[str, Any] ):
snake_case__ : str = self.num_choices
snake_case__ : Dict = DistilBertForMultipleChoice(config=__lowercase )
model.to(__lowercase )
model.eval()
snake_case__ : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
snake_case__ : int = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
snake_case__ : List[str] = model(
__lowercase ,attention_mask=__lowercase ,labels=__lowercase ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def __lowerCamelCase ( self :List[str] ):
snake_case__ : Union[str, Any] = self.prepare_config_and_inputs()
(snake_case__) : List[str] = config_and_inputs
snake_case__ : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
__lowerCAmelCase : Tuple = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
__lowerCAmelCase : Optional[int] = (
{
"""feature-extraction""": DistilBertModel,
"""fill-mask""": DistilBertForMaskedLM,
"""question-answering""": DistilBertForQuestionAnswering,
"""text-classification""": DistilBertForSequenceClassification,
"""token-classification""": DistilBertForTokenClassification,
"""zero-shot""": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCAmelCase : List[Any] = True
__lowerCAmelCase : int = True
__lowerCAmelCase : Tuple = True
__lowerCAmelCase : List[Any] = True
def __lowerCamelCase ( self :Any ):
snake_case__ : int = DistilBertModelTester(self )
snake_case__ : Tuple = ConfigTester(self ,config_class=__lowercase ,dim=3_7 )
def __lowerCamelCase ( self :int ):
self.config_tester.run_common_tests()
def __lowerCamelCase ( self :List[Any] ):
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__lowercase )
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__lowercase )
def __lowerCamelCase ( self :Any ):
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__lowercase )
def __lowerCamelCase ( self :List[Any] ):
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__lowercase )
def __lowerCamelCase ( self :Dict ):
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__lowercase )
def __lowerCamelCase ( self :List[Any] ):
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__lowercase )
@slow
def __lowerCamelCase ( self :str ):
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : Dict = DistilBertModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
@slow
@require_torch_gpu
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
snake_case__ : Optional[int] = True
snake_case__ : Tuple = model_class(config=__lowercase )
snake_case__ : Optional[Any] = self._prepare_for_class(__lowercase ,__lowercase )
snake_case__ : Any = torch.jit.trace(
__lowercase ,(inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__lowercase ,os.path.join(__lowercase ,'''traced_model.pt''' ) )
snake_case__ : List[str] = torch.jit.load(os.path.join(__lowercase ,'''traced_model.pt''' ) ,map_location=__lowercase )
loaded(inputs_dict['''input_ids'''].to(__lowercase ) ,inputs_dict['''attention_mask'''].to(__lowercase ) )
@require_torch
class a ( unittest.TestCase ):
@slow
def __lowerCamelCase ( self :int ):
snake_case__ : str = DistilBertModel.from_pretrained('''distilbert-base-uncased''' )
snake_case__ : str = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
snake_case__ : Dict = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
snake_case__ : Tuple = model(__lowercase ,attention_mask=__lowercase )[0]
snake_case__ : List[str] = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape ,__lowercase )
snake_case__ : str = torch.tensor(
[[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,__lowercase ,atol=1e-4 ) )
| 707 |
def _lowerCAmelCase ( __lowerCAmelCase ) -> float:
"""simple docstring"""
if edge <= 0 or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('''Length must be a positive.''' )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def _lowerCAmelCase ( __lowerCAmelCase ) -> float:
"""simple docstring"""
if edge <= 0 or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('''Length must be a positive.''' )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 219 | 0 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] ):
'''simple docstring'''
if "img_encoder.pos_embed" in name:
_lowerCAmelCase = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" )
if "img_encoder.patch_embed.proj" in name:
_lowerCAmelCase = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" )
if "img_encoder.patch_embed.norm" in name:
_lowerCAmelCase = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" )
if "img_encoder.layers" in name:
_lowerCAmelCase = name.replace("img_encoder.layers" , "vision_model.encoder.stages" )
if "blocks" in name and "res" not in name:
_lowerCAmelCase = name.replace("blocks" , "layers" )
if "attn" in name and "pre_assign" not in name:
_lowerCAmelCase = name.replace("attn" , "self_attn" )
if "proj" in name and "self_attn" in name and "text" not in name:
_lowerCAmelCase = name.replace("proj" , "out_proj" )
if "pre_assign_attn.attn.proj" in name:
_lowerCAmelCase = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" )
if "norm1" in name:
_lowerCAmelCase = name.replace("norm1" , "layer_norm1" )
if "norm2" in name and "pre_assign" not in name:
_lowerCAmelCase = name.replace("norm2" , "layer_norm2" )
if "img_encoder.norm" in name:
_lowerCAmelCase = name.replace("img_encoder.norm" , "vision_model.layernorm" )
# text encoder
if "text_encoder.token_embedding" in name:
_lowerCAmelCase = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" )
if "text_encoder.positional_embedding" in name:
_lowerCAmelCase = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "text_encoder.transformer.resblocks." in name:
_lowerCAmelCase = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." )
if "ln_1" in name:
_lowerCAmelCase = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
_lowerCAmelCase = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
_lowerCAmelCase = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
_lowerCAmelCase = name.replace("c_proj" , "fc2" )
if "text_encoder" in name:
_lowerCAmelCase = name.replace("text_encoder" , "text_model" )
if "ln_final" in name:
_lowerCAmelCase = name.replace("ln_final" , "final_layer_norm" )
# projection layers
if "img_projector.linear_hidden." in name:
_lowerCAmelCase = name.replace("img_projector.linear_hidden." , "visual_projection." )
if "img_projector.linear_out." in name:
_lowerCAmelCase = name.replace("img_projector.linear_out." , "visual_projection.3." )
if "text_projector.linear_hidden" in name:
_lowerCAmelCase = name.replace("text_projector.linear_hidden" , "text_projection" )
if "text_projector.linear_out" in name:
_lowerCAmelCase = name.replace("text_projector.linear_out" , "text_projection.3" )
return name
def __a(SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_lowerCAmelCase = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCAmelCase = key.split("." )
_lowerCAmelCase , _lowerCAmelCase = int(key_split[2] ), int(key_split[4] )
_lowerCAmelCase = config.vision_config.hidden_size
if "weight" in key:
_lowerCAmelCase = val[:dim, :]
_lowerCAmelCase = val[dim : dim * 2, :]
_lowerCAmelCase = val[-dim:, :]
else:
_lowerCAmelCase = val[:dim]
_lowerCAmelCase = val[dim : dim * 2]
_lowerCAmelCase = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCAmelCase = key.split("." )
_lowerCAmelCase = int(key_split[3] )
_lowerCAmelCase = config.text_config.hidden_size
if "weight" in key:
_lowerCAmelCase = val[:dim, :]
_lowerCAmelCase = val[
dim : dim * 2, :
]
_lowerCAmelCase = val[-dim:, :]
else:
_lowerCAmelCase = val[:dim]
_lowerCAmelCase = val[dim : dim * 2]
_lowerCAmelCase = val[-dim:]
else:
_lowerCAmelCase = rename_key(SCREAMING_SNAKE_CASE_ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
_lowerCAmelCase = val.squeeze_()
else:
_lowerCAmelCase = val
return orig_state_dict
def __a():
'''simple docstring'''
_lowerCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def __a(SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int]="groupvit-gcc-yfcc" , SCREAMING_SNAKE_CASE_ : Optional[Any]=False ):
'''simple docstring'''
_lowerCAmelCase = GroupViTConfig()
_lowerCAmelCase = GroupViTModel(SCREAMING_SNAKE_CASE_ ).eval()
_lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ , map_location="cpu" )["model"]
_lowerCAmelCase = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase , _lowerCAmelCase = model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(SCREAMING_SNAKE_CASE_ ) == 0)
# verify result
_lowerCAmelCase = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" )
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = processor(text=["a photo of a cat", "a photo of a dog"] , images=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , return_tensors="pt" )
with torch.no_grad():
_lowerCAmelCase = model(**SCREAMING_SNAKE_CASE_ )
if model_name == "groupvit-gcc-yfcc":
_lowerCAmelCase = torch.tensor([[13.3523, 6.3629]] )
elif model_name == "groupvit-gcc-redcaps":
_lowerCAmelCase = torch.tensor([[16.1873, 8.6230]] )
else:
raise ValueError(F'''Model name {model_name} not supported.''' )
assert torch.allclose(outputs.logits_per_image , SCREAMING_SNAKE_CASE_ , atol=1e-3 )
processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print("Successfully saved processor and model to" , SCREAMING_SNAKE_CASE_ )
if push_to_hub:
print("Pushing to the hub..." )
processor.push_to_hub(SCREAMING_SNAKE_CASE_ , organization="nielsr" )
model.push_to_hub(SCREAMING_SNAKE_CASE_ , organization="nielsr" )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to dump the processor and PyTorch model."
)
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to GroupViT checkpoint")
parser.add_argument(
"--model_name",
default="groupvit-gccy-fcc",
type=str,
help="Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.",
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 18 |
'''simple docstring'''
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def __a(SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple=None ):
'''simple docstring'''
_lowerCAmelCase = None
if token is not None:
_lowerCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
_lowerCAmelCase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
_lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ ).json()
_lowerCAmelCase = {}
try:
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
_lowerCAmelCase = math.ceil((result["total_count"] - 100) / 100 )
for i in range(SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase = requests.get(url + F'''&page={i + 2}''' , headers=SCREAMING_SNAKE_CASE_ ).json()
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return job_links
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict=None ):
'''simple docstring'''
_lowerCAmelCase = None
if token is not None:
_lowerCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
_lowerCAmelCase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'''
_lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ ).json()
_lowerCAmelCase = {}
try:
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
_lowerCAmelCase = math.ceil((result["total_count"] - 100) / 100 )
for i in range(SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase = requests.get(url + F'''&page={i + 2}''' , headers=SCREAMING_SNAKE_CASE_ ).json()
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
return artifacts
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def __a(SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Dict ):
'''simple docstring'''
_lowerCAmelCase = None
if token is not None:
_lowerCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
_lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ , allow_redirects=SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = result.headers["Location"]
_lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , allow_redirects=SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , F'''{artifact_name}.zip''' )
with open(SCREAMING_SNAKE_CASE_ , "wb" ) as fp:
fp.write(response.content )
def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any]=None ):
'''simple docstring'''
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = None
with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ ) as z:
for filename in z.namelist():
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(SCREAMING_SNAKE_CASE_ ) as f:
for line in f:
_lowerCAmelCase = line.decode("UTF-8" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
_lowerCAmelCase = line[: line.index(": " )]
_lowerCAmelCase = line[line.index(": " ) + len(": " ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("FAILED " ):
# `test` is the test method that failed
_lowerCAmelCase = line[len("FAILED " ) :]
failed_tests.append(SCREAMING_SNAKE_CASE_ )
elif filename == "job_name.txt":
_lowerCAmelCase = line
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
F'''`errors` and `failed_tests` should have the same number of elements. Got {len(SCREAMING_SNAKE_CASE_ )} for `errors` '''
F'''and {len(SCREAMING_SNAKE_CASE_ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'''
" problem." )
_lowerCAmelCase = None
if job_name and job_links:
_lowerCAmelCase = job_links.get(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# A list with elements of the form (line of error, error, failed test)
_lowerCAmelCase = [x + [y] + [job_link] for x, y in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )]
return result
def __a(SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple=None ):
'''simple docstring'''
_lowerCAmelCase = []
_lowerCAmelCase = [os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for p in os.listdir(SCREAMING_SNAKE_CASE_ ) if p.endswith(".zip" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(SCREAMING_SNAKE_CASE_ , job_links=SCREAMING_SNAKE_CASE_ ) )
return errors
def __a(SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str=None ):
'''simple docstring'''
_lowerCAmelCase = Counter()
counter.update([x[1] for x in logs] )
_lowerCAmelCase = counter.most_common()
_lowerCAmelCase = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
_lowerCAmelCase = {"count": count, "failed_tests": [(x[2], x[0]) for x in logs if x[1] == error]}
_lowerCAmelCase = dict(sorted(r.items() , key=lambda SCREAMING_SNAKE_CASE_ : item[1]["count"] , reverse=SCREAMING_SNAKE_CASE_ ) )
return r
def __a(SCREAMING_SNAKE_CASE_ : List[str] ):
'''simple docstring'''
_lowerCAmelCase = test.split("::" )[0]
if test.startswith("tests/models/" ):
_lowerCAmelCase = test.split("/" )[2]
else:
_lowerCAmelCase = None
return test
def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple=None ):
'''simple docstring'''
_lowerCAmelCase = [(x[0], x[1], get_model(x[2] )) for x in logs]
_lowerCAmelCase = [x for x in logs if x[2] is not None]
_lowerCAmelCase = {x[2] for x in logs}
_lowerCAmelCase = {}
for test in tests:
_lowerCAmelCase = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
_lowerCAmelCase = counter.most_common()
_lowerCAmelCase = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
_lowerCAmelCase = sum(error_counts.values() )
if n_errors > 0:
_lowerCAmelCase = {"count": n_errors, "errors": error_counts}
_lowerCAmelCase = dict(sorted(r.items() , key=lambda SCREAMING_SNAKE_CASE_ : item[1]["count"] , reverse=SCREAMING_SNAKE_CASE_ ) )
return r
def __a(SCREAMING_SNAKE_CASE_ : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase = "| no. | error | status |"
_lowerCAmelCase = "|-:|:-|:-|"
_lowerCAmelCase = [header, sep]
for error in reduced_by_error:
_lowerCAmelCase = reduced_by_error[error]["count"]
_lowerCAmelCase = F'''| {count} | {error[:100]} | |'''
lines.append(SCREAMING_SNAKE_CASE_ )
return "\n".join(SCREAMING_SNAKE_CASE_ )
def __a(SCREAMING_SNAKE_CASE_ : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase = "| model | no. of errors | major error | count |"
_lowerCAmelCase = "|-:|-:|-:|-:|"
_lowerCAmelCase = [header, sep]
for model in reduced_by_model:
_lowerCAmelCase = reduced_by_model[model]["count"]
_lowerCAmelCase , _lowerCAmelCase = list(reduced_by_model[model]["errors"].items() )[0]
_lowerCAmelCase = F'''| {model} | {count} | {error[:60]} | {_count} |'''
lines.append(SCREAMING_SNAKE_CASE_ )
return "\n".join(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
_SCREAMING_SNAKE_CASE = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
_SCREAMING_SNAKE_CASE = get_job_links(args.workflow_run_id, token=args.token)
_SCREAMING_SNAKE_CASE = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
_SCREAMING_SNAKE_CASE = k.find(" / ")
_SCREAMING_SNAKE_CASE = k[index + len(" / ") :]
_SCREAMING_SNAKE_CASE = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
_SCREAMING_SNAKE_CASE = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
_SCREAMING_SNAKE_CASE = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
_SCREAMING_SNAKE_CASE = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
_SCREAMING_SNAKE_CASE = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
_SCREAMING_SNAKE_CASE = reduce_by_error(errors)
_SCREAMING_SNAKE_CASE = reduce_by_model(errors)
_SCREAMING_SNAKE_CASE = make_github_table(reduced_by_error)
_SCREAMING_SNAKE_CASE = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 18 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
_snake_case : Any = """https://www.indeed.co.in/jobs?q=mobile+app+development&l="""
def snake_case_ (UpperCamelCase : Tuple = "mumbai" ):
'''simple docstring'''
_a = BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
_a = job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
_a = job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('Bangalore'), 1):
print(F'''Job {i:>2} is {job[0]} at {job[1]}''')
| 716 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
_snake_case : List[Any] = {'configuration_beit': ['BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BeitConfig', 'BeitOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Any = ['BeitFeatureExtractor']
_snake_case : Any = ['BeitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[str] = [
'BEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BeitForImageClassification',
'BeitForMaskedImageModeling',
'BeitForSemanticSegmentation',
'BeitModel',
'BeitPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[str] = [
'FlaxBeitForImageClassification',
'FlaxBeitForMaskedImageModeling',
'FlaxBeitModel',
'FlaxBeitPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
_snake_case : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 377 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
__A : Any = {
"""susnato/ernie-m-base_pytorch""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json""",
"""susnato/ernie-m-large_pytorch""": """https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json""",
}
class __snake_case ( __lowerCamelCase):
"""simple docstring"""
lowercase = 'ernie_m'
lowercase = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self : List[Any] , lowerCamelCase : int = 25_00_02 , lowerCamelCase : int = 7_68 , lowerCamelCase : int = 12 , lowerCamelCase : int = 12 , lowerCamelCase : int = 30_72 , lowerCamelCase : str = "gelu" , lowerCamelCase : float = 0.1 , lowerCamelCase : float = 0.1 , lowerCamelCase : int = 5_14 , lowerCamelCase : float = 0.02 , lowerCamelCase : int = 1 , lowerCamelCase : float = 1E-05 , lowerCamelCase : int=None , lowerCamelCase : str=False , lowerCamelCase : List[Any]=0.0 , **lowerCamelCase : Dict , ) -> Optional[int]:
super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
lowerCAmelCase_ : List[Any] = vocab_size
lowerCAmelCase_ : Any = hidden_size
lowerCAmelCase_ : List[str] = num_hidden_layers
lowerCAmelCase_ : int = num_attention_heads
lowerCAmelCase_ : Tuple = intermediate_size
lowerCAmelCase_ : Tuple = hidden_act
lowerCAmelCase_ : int = hidden_dropout_prob
lowerCAmelCase_ : int = attention_probs_dropout_prob
lowerCAmelCase_ : Optional[Any] = max_position_embeddings
lowerCAmelCase_ : Tuple = initializer_range
lowerCAmelCase_ : Dict = layer_norm_eps
lowerCAmelCase_ : str = classifier_dropout
lowerCAmelCase_ : List[Any] = is_decoder
lowerCAmelCase_ : Optional[Any] = act_dropout
| 275 |
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
_SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
logging.set_verbosity_info()
def _lowercase ( __lowerCamelCase : str ,__lowerCamelCase : str ) -> Union[str, Any]:
'''simple docstring'''
if "xprophetnet" in prophetnet_checkpoint_path:
UpperCamelCase__ : Optional[int] = XLMProphetNetForConditionalGenerationOld.from_pretrained(__lowerCamelCase )
UpperCamelCase__ ,UpperCamelCase__ : Optional[int] = XLMProphetNetForConditionalGeneration.from_pretrained(
__lowerCamelCase ,output_loading_info=__lowerCamelCase )
else:
UpperCamelCase__ : Union[str, Any] = ProphetNetForConditionalGenerationOld.from_pretrained(__lowerCamelCase )
UpperCamelCase__ ,UpperCamelCase__ : Union[str, Any] = ProphetNetForConditionalGeneration.from_pretrained(
__lowerCamelCase ,output_loading_info=__lowerCamelCase )
UpperCamelCase__ : Optional[Any] = ['''key_proj''', '''value_proj''', '''query_proj''']
UpperCamelCase__ : Dict = {
'''self_attn''': '''ngram_self_attn''',
'''cross_attn''': '''encoder_attn''',
'''cross_attn_layer_norm''': '''encoder_attn_layer_norm''',
'''feed_forward_layer_norm''': '''final_layer_norm''',
'''feed_forward''': '''''',
'''intermediate''': '''fc1''',
'''output''': '''fc2''',
'''key_proj''': '''k_proj''',
'''query_proj''': '''q_proj''',
'''value_proj''': '''v_proj''',
'''word_embeddings''': '''embed_tokens''',
'''embeddings_layer_norm''': '''emb_layer_norm''',
'''relative_pos_embeddings''': '''relative_linear''',
'''ngram_embeddings''': '''ngram_input_embed''',
'''position_embeddings''': '''embed_positions''',
}
for key in loading_info["missing_keys"]:
UpperCamelCase__ : List[Any] = key.split('''.''' )
if attributes[0] == "lm_head":
UpperCamelCase__ : Union[str, Any] = prophet
UpperCamelCase__ : Union[str, Any] = prophet_old
else:
UpperCamelCase__ : Tuple = prophet.prophetnet
UpperCamelCase__ : str = prophet_old.model
UpperCamelCase__ : Optional[Any] = False
for attribute in attributes:
if attribute in mapping:
UpperCamelCase__ : Optional[int] = mapping[attribute]
if not hasattr(__lowerCamelCase ,__lowerCamelCase ) and len(__lowerCamelCase ) > 0:
UpperCamelCase__ : int = attribute
elif hasattr(__lowerCamelCase ,__lowerCamelCase ):
UpperCamelCase__ : Optional[int] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
UpperCamelCase__ : List[str] = old_model.weight
logger.info(F'{attribute} is initialized.' )
UpperCamelCase__ : Optional[Any] = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
UpperCamelCase__ : Optional[int] = old_model.bias
logger.info(F'{attribute} is initialized' )
UpperCamelCase__ : List[str] = True
break
elif attribute in special_keys and hasattr(__lowerCamelCase ,'''in_proj_weight''' ):
UpperCamelCase__ : List[str] = old_model.in_proj_weight.shape[0] // 3
UpperCamelCase__ : List[str] = getattr(__lowerCamelCase ,__lowerCamelCase )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
UpperCamelCase__ : List[Any] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
UpperCamelCase__ : List[Any] = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
UpperCamelCase__ : str = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
UpperCamelCase__ : Optional[int] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
UpperCamelCase__ : Any = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
UpperCamelCase__ : Any = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
UpperCamelCase__ : int = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
UpperCamelCase__ : List[str] = nn.Parameter(old_model.embed_positions.weight[:512, :] )
UpperCamelCase__ : Tuple = True
break
if attribute.isdigit():
UpperCamelCase__ : Dict = model[int(__lowerCamelCase )]
UpperCamelCase__ : str = old_model[int(__lowerCamelCase )]
else:
UpperCamelCase__ : str = getattr(__lowerCamelCase ,__lowerCamelCase )
if old_attribute == "":
UpperCamelCase__ : Dict = old_model
else:
if not hasattr(__lowerCamelCase ,__lowerCamelCase ):
raise ValueError(F'{old_model} does not have {old_attribute}' )
UpperCamelCase__ : int = getattr(__lowerCamelCase ,__lowerCamelCase )
if not is_key_init:
raise ValueError(F'{key} was not correctly initialized!' )
print(F'Saving model to {pytorch_dump_folder_path}' )
prophet.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 344 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={
"bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json",
}
class UpperCamelCase ( lowercase_ ):
lowercase = 'gpt_bigcode'
lowercase = ['past_key_values']
lowercase = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self ,__UpperCamelCase=5_0257 ,__UpperCamelCase=1024 ,__UpperCamelCase=768 ,__UpperCamelCase=12 ,__UpperCamelCase=12 ,__UpperCamelCase=None ,__UpperCamelCase="gelu_pytorch_tanh" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=1e-5 ,__UpperCamelCase=0.02 ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=5_0256 ,__UpperCamelCase=5_0256 ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=True ,**__UpperCamelCase ,) -> str:
'''simple docstring'''
lowercase_ : Dict = vocab_size
lowercase_ : Any = n_positions
lowercase_ : List[Any] = n_embd
lowercase_ : List[Any] = n_layer
lowercase_ : str = n_head
lowercase_ : Union[str, Any] = n_inner
lowercase_ : Optional[int] = activation_function
lowercase_ : Tuple = resid_pdrop
lowercase_ : List[str] = embd_pdrop
lowercase_ : int = attn_pdrop
lowercase_ : Dict = layer_norm_epsilon
lowercase_ : Optional[Any] = initializer_range
lowercase_ : Union[str, Any] = scale_attn_weights
lowercase_ : int = use_cache
lowercase_ : int = attention_softmax_in_fpaa
lowercase_ : Dict = scale_attention_softmax_in_fpaa
lowercase_ : List[str] = multi_query
lowercase_ : str = bos_token_id
lowercase_ : Optional[int] = eos_token_id
super().__init__(bos_token_id=__UpperCamelCase ,eos_token_id=__UpperCamelCase ,**__UpperCamelCase )
| 477 | """simple docstring"""
import numpy as np
def lowercase__( __SCREAMING_SNAKE_CASE : np.array ):
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 477 | 1 |
from maths.prime_factors import prime_factors
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> int:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase_ = f"Input value of [number={number}] must be an integer"
raise TypeError(SCREAMING_SNAKE_CASE_ )
if number < 1:
raise ValueError("Input must be a positive integer" )
return -1 if len(prime_factors(SCREAMING_SNAKE_CASE_ ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 628 |
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> list:
"""simple docstring"""
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
UpperCamelCase_ = gray_code_sequence_string(SCREAMING_SNAKE_CASE_ )
#
# convert them to integers
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
UpperCamelCase_ = int(sequence[i] , 2 )
return sequence
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> list:
"""simple docstring"""
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
UpperCamelCase_ = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
UpperCamelCase_ = gray_code_sequence_string(bit_count - 1 )
UpperCamelCase_ = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
UpperCamelCase_ = "0" + smaller_sequence[i]
sequence.append(SCREAMING_SNAKE_CASE_ )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
UpperCamelCase_ = "1" + smaller_sequence[i]
sequence.append(SCREAMING_SNAKE_CASE_ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 628 | 1 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
# General docstring
UpperCAmelCase_ : Any = "RegNetConfig"
# Base docstring
UpperCAmelCase_ : Optional[int] = "facebook/regnet-y-040"
UpperCAmelCase_ : str = [1, 1088, 7, 7]
# Image classification docstring
UpperCAmelCase_ : str = "facebook/regnet-y-040"
UpperCAmelCase_ : Union[str, Any] = "tabby, tabby cat"
UpperCAmelCase_ : List[Any] = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class a ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_ = 3 , lowerCamelCase_ = 1 , lowerCamelCase_ = 1 , lowerCamelCase_ = "relu" , **lowerCamelCase_ , ) -> List[str]:
super().__init__(**lowerCamelCase_ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
_a : Tuple = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
_a : Any = tf.keras.layers.ConvaD(
filters=lowerCamelCase_ , kernel_size=lowerCamelCase_ , strides=lowerCamelCase_ , padding='VALID' , groups=lowerCamelCase_ , use_bias=lowerCamelCase_ , name='convolution' , )
_a : Any = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='normalization' )
_a : Optional[Any] = ACTaFN[activation] if activation is not None else tf.identity
def __UpperCamelCase ( self , lowerCamelCase_ ) -> List[str]:
_a : int = self.convolution(self.padding(lowerCamelCase_ ) )
_a : Any = self.normalization(lowerCamelCase_ )
_a : Tuple = self.activation(lowerCamelCase_ )
return hidden_state
class a ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , **lowerCamelCase_ ) -> str:
super().__init__(**lowerCamelCase_ )
_a : str = config.num_channels
_a : Dict = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , )
def __UpperCamelCase ( self , lowerCamelCase_ ) -> Any:
_a : Dict = shape_list(lowerCamelCase_ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
_a : List[str] = tf.transpose(lowerCamelCase_ , perm=(0, 2, 3, 1) )
_a : int = self.embedder(lowerCamelCase_ )
return hidden_state
class a ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_ = 2 , **lowerCamelCase_ ) -> Union[str, Any]:
super().__init__(**lowerCamelCase_ )
_a : Optional[int] = tf.keras.layers.ConvaD(
filters=lowerCamelCase_ , kernel_size=1 , strides=lowerCamelCase_ , use_bias=lowerCamelCase_ , name='convolution' )
_a : int = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='normalization' )
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ = False ) -> tf.Tensor:
return self.normalization(self.convolution(lowerCamelCase_ ) , training=lowerCamelCase_ )
class a ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ) -> Union[str, Any]:
super().__init__(**lowerCamelCase_ )
_a : Union[str, Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCamelCase_ , name='pooler' )
_a : Tuple = [
tf.keras.layers.ConvaD(filters=lowerCamelCase_ , kernel_size=1 , activation='relu' , name='attention.0' ),
tf.keras.layers.ConvaD(filters=lowerCamelCase_ , kernel_size=1 , activation='sigmoid' , name='attention.2' ),
]
def __UpperCamelCase ( self , lowerCamelCase_ ) -> int:
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
_a : int = self.pooler(lowerCamelCase_ )
for layer_module in self.attention:
_a : Optional[int] = layer_module(lowerCamelCase_ )
_a : Union[str, Any] = hidden_state * pooled
return hidden_state
class a ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 1 , **lowerCamelCase_ ) -> List[str]:
super().__init__(**lowerCamelCase_ )
_a : Optional[Any] = in_channels != out_channels or stride != 1
_a : str = max(1 , out_channels // config.groups_width )
_a : Dict = (
TFRegNetShortCut(lowerCamelCase_ , stride=lowerCamelCase_ , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
_a : Optional[Any] = [
TFRegNetConvLayer(lowerCamelCase_ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
lowerCamelCase_ , stride=lowerCamelCase_ , groups=lowerCamelCase_ , activation=config.hidden_act , name='layer.1' ),
TFRegNetConvLayer(lowerCamelCase_ , kernel_size=1 , activation=lowerCamelCase_ , name='layer.2' ),
]
_a : Dict = ACTaFN[config.hidden_act]
def __UpperCamelCase ( self , lowerCamelCase_ ) -> Dict:
_a : str = hidden_state
for layer_module in self.layers:
_a : Optional[Any] = layer_module(lowerCamelCase_ )
_a : Any = self.shortcut(lowerCamelCase_ )
hidden_state += residual
_a : Optional[Any] = self.activation(lowerCamelCase_ )
return hidden_state
class a ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 1 , **lowerCamelCase_ ) -> int:
super().__init__(**lowerCamelCase_ )
_a : Union[str, Any] = in_channels != out_channels or stride != 1
_a : Dict = max(1 , out_channels // config.groups_width )
_a : int = (
TFRegNetShortCut(lowerCamelCase_ , stride=lowerCamelCase_ , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
_a : Dict = [
TFRegNetConvLayer(lowerCamelCase_ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
lowerCamelCase_ , stride=lowerCamelCase_ , groups=lowerCamelCase_ , activation=config.hidden_act , name='layer.1' ),
TFRegNetSELayer(lowerCamelCase_ , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ),
TFRegNetConvLayer(lowerCamelCase_ , kernel_size=1 , activation=lowerCamelCase_ , name='layer.3' ),
]
_a : str = ACTaFN[config.hidden_act]
def __UpperCamelCase ( self , lowerCamelCase_ ) -> List[str]:
_a : Tuple = hidden_state
for layer_module in self.layers:
_a : Dict = layer_module(lowerCamelCase_ )
_a : Optional[int] = self.shortcut(lowerCamelCase_ )
hidden_state += residual
_a : Dict = self.activation(lowerCamelCase_ )
return hidden_state
class a ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 2 , lowerCamelCase_ = 2 , **lowerCamelCase_ ) -> Any:
super().__init__(**lowerCamelCase_ )
_a : Optional[Any] = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer
_a : int = [
# downsampling is done in the first layer with stride of 2
layer(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , stride=lowerCamelCase_ , name='layers.0' ),
*[layer(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , name=F'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def __UpperCamelCase ( self , lowerCamelCase_ ) -> Any:
for layer_module in self.layers:
_a : Any = layer_module(lowerCamelCase_ )
return hidden_state
class a ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , **lowerCamelCase_ ) -> str:
super().__init__(**lowerCamelCase_ )
_a : List[str] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
lowerCamelCase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) )
_a : List[str] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(lowerCamelCase_ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , depth=lowerCamelCase_ , name=F'''stages.{i+1}''' ) )
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = True ) -> TFBaseModelOutputWithNoAttention:
_a : List[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_a : int = hidden_states + (hidden_state,)
_a : List[Any] = stage_module(lowerCamelCase_ )
if output_hidden_states:
_a : List[str] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=lowerCamelCase_ , hidden_states=lowerCamelCase_ )
@keras_serializable
class a ( tf.keras.layers.Layer ):
'''simple docstring'''
__lowerCAmelCase : Dict = RegNetConfig
def __init__( self , lowerCamelCase_ , **lowerCamelCase_ ) -> Any:
super().__init__(**lowerCamelCase_ )
_a : Any = config
_a : Union[str, Any] = TFRegNetEmbeddings(lowerCamelCase_ , name='embedder' )
_a : List[Any] = TFRegNetEncoder(lowerCamelCase_ , name='encoder' )
_a : Optional[int] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCamelCase_ , name='pooler' )
@unpack_inputs
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention:
_a : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_a : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
_a : Optional[int] = self.embedder(lowerCamelCase_ , training=lowerCamelCase_ )
_a : List[str] = self.encoder(
lowerCamelCase_ , output_hidden_states=lowerCamelCase_ , return_dict=lowerCamelCase_ , training=lowerCamelCase_ )
_a : Optional[int] = encoder_outputs[0]
_a : Tuple = self.pooler(lowerCamelCase_ )
# Change to NCHW output format have uniformity in the modules
_a : Any = tf.transpose(lowerCamelCase_ , perm=(0, 3, 1, 2) )
_a : List[Any] = tf.transpose(lowerCamelCase_ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
_a : List[str] = tuple([tf.transpose(lowerCamelCase_ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCamelCase_ , pooler_output=lowerCamelCase_ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class a ( snake_case__ ):
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = RegNetConfig
__lowerCAmelCase : Dict = """regnet"""
__lowerCAmelCase : Any = """pixel_values"""
@property
def __UpperCamelCase ( self ) -> List[Any]:
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )}
UpperCAmelCase_ : Dict = R"\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n"
UpperCAmelCase_ : Optional[int] = R"\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""" , snake_case__ , )
class a ( snake_case__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ ) -> Optional[Any]:
super().__init__(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
_a : Optional[Any] = TFRegNetMainLayer(lowerCamelCase_ , name='regnet' )
@unpack_inputs
@add_start_docstrings_to_model_forward(lowerCamelCase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCamelCase_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
_a : Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_a : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
_a : Dict = self.regnet(
pixel_values=lowerCamelCase_ , output_hidden_states=lowerCamelCase_ , return_dict=lowerCamelCase_ , training=lowerCamelCase_ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , snake_case__ , )
class a ( snake_case__ , snake_case__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ ) -> Dict:
super().__init__(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
_a : Optional[Any] = config.num_labels
_a : Tuple = TFRegNetMainLayer(lowerCamelCase_ , name='regnet' )
# classification head
_a : str = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(lowerCamelCase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCamelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __UpperCamelCase ( self , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
_a : Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_a : Dict = return_dict if return_dict is not None else self.config.use_return_dict
_a : List[str] = self.regnet(
lowerCamelCase_ , output_hidden_states=lowerCamelCase_ , return_dict=lowerCamelCase_ , training=lowerCamelCase_ )
_a : List[Any] = outputs.pooler_output if return_dict else outputs[1]
_a : Dict = self.classifier[0](lowerCamelCase_ )
_a : Tuple = self.classifier[1](lowerCamelCase_ )
_a : Union[str, Any] = None if labels is None else self.hf_compute_loss(labels=lowerCamelCase_ , logits=lowerCamelCase_ )
if not return_dict:
_a : List[str] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=lowerCamelCase_ , logits=lowerCamelCase_ , hidden_states=outputs.hidden_states )
| 720 |
'''simple docstring'''
import qiskit
def UpperCAmelCase_ ( A = 2 ):
'''simple docstring'''
_a : Union[str, Any] = qubits
# Using Aer's simulator
_a : str = qiskit.Aer.get_backend('aer_simulator' )
# Creating a Quantum Circuit acting on the q register
_a : Tuple = qiskit.QuantumCircuit(A , A )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , A ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , A )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(A ) ) , list(range(A ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
_a : Optional[int] = qiskit.execute(A , A , shots=1_0_0_0 )
return job.result().get_counts(A )
if __name__ == "__main__":
print(f'''Total count for various states are: {quantum_entanglement(3)}''')
| 424 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a : int = {
'''configuration_clip''': [
'''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPConfig''',
'''CLIPOnnxConfig''',
'''CLIPTextConfig''',
'''CLIPVisionConfig''',
],
'''processing_clip''': ['''CLIPProcessor'''],
'''tokenization_clip''': ['''CLIPTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = ['''CLIPTokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = ['''CLIPFeatureExtractor''']
a : int = ['''CLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = [
'''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPModel''',
'''CLIPPreTrainedModel''',
'''CLIPTextModel''',
'''CLIPTextModelWithProjection''',
'''CLIPVisionModel''',
'''CLIPVisionModelWithProjection''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = [
'''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCLIPModel''',
'''TFCLIPPreTrainedModel''',
'''TFCLIPTextModel''',
'''TFCLIPVisionModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = [
'''FlaxCLIPModel''',
'''FlaxCLIPPreTrainedModel''',
'''FlaxCLIPTextModel''',
'''FlaxCLIPTextPreTrainedModel''',
'''FlaxCLIPVisionModel''',
'''FlaxCLIPVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
a : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 639 |
a : Any = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100000)]
def lowercase_ ( _UpperCamelCase ):
'''simple docstring'''
__lowercase = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_00_00]
number //= 10_00_00
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
a : list[bool | None] = [None] * 10000000
a : int = True
a : Any = False
def lowercase_ ( _UpperCamelCase ):
'''simple docstring'''
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
__lowercase = chain(next_number(_UpperCamelCase ) )
__lowercase = number_chain
while number < 10_00_00_00:
__lowercase = number_chain
number *= 10
return number_chain
def lowercase_ ( _UpperCamelCase = 10_00_00_00 ):
'''simple docstring'''
for i in range(1 , _UpperCamelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{solution() = }''')
| 639 | 1 |
"""simple docstring"""
def lowercase_ ( _lowerCamelCase: int , _lowerCamelCase: bool = False ) -> bool:
'''simple docstring'''
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3317044064679887385961981 and not allow_probable:
raise ValueError(
"Warning: upper bound of deterministic test is exceeded. "
"Pass allow_probable=True to allow probabilistic test. "
"A return value of True indicates a probable prime." )
# array bounds provided by analysis
__lowerCamelCase : Tuple = [
2047,
1373653,
25326001,
3215031751,
2152302898747,
3474749660383,
341550071728321,
1,
3825123056546413051,
1,
1,
318665857834031151167461,
3317044064679887385961981,
]
__lowerCamelCase : Tuple = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(_lowerCamelCase , 1 ):
if n < _p:
# then we have our last prime to check
__lowerCamelCase : str = primes[:idx]
break
__lowerCamelCase , __lowerCamelCase : str = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
__lowerCamelCase : List[Any] = False
for r in range(_lowerCamelCase ):
__lowerCamelCase : Any = pow(_lowerCamelCase , d * 2**r , _lowerCamelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
__lowerCamelCase : List[str] = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def lowercase_ ( ) -> None:
'''simple docstring'''
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(838201 )
assert miller_rabin(838207 )
# 1_373_653
assert not miller_rabin(17316001 )
assert miller_rabin(17316017 )
# 25_326_001
assert not miller_rabin(3078386641 )
assert miller_rabin(3078386653 )
# 3_215_031_751
assert not miller_rabin(1713045574801 )
assert miller_rabin(1713045574819 )
# 2_152_302_898_747
assert not miller_rabin(2779799728307 )
assert miller_rabin(2779799728327 )
# 3_474_749_660_383
assert not miller_rabin(113850023909441 )
assert miller_rabin(113850023909527 )
# 341_550_071_728_321
assert not miller_rabin(1275041018848804351 )
assert miller_rabin(1275041018848804391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(79666464458507787791867 )
assert miller_rabin(79666464458507787791951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(552840677446647897660333 )
assert miller_rabin(552840677446647897660359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin() | 366 | """simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
__A = (720, 1280) # Height, Width
__A = (0.4, 0.6) # if height or width lower than this scale, drop it.
__A = 1 / 100
__A = ''''''
__A = ''''''
__A = ''''''
__A = 250
def lowercase_ ( ) -> None:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase : List[Any] = get_dataset(_lowerCamelCase , _lowerCamelCase )
for index in range(_lowerCamelCase ):
__lowerCamelCase : Optional[Any] = random.sample(range(len(_lowerCamelCase ) ) , 4 )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = update_image_and_anno(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , filter_scale=_lowerCamelCase , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__lowerCamelCase : Tuple = random_chars(32 )
__lowerCamelCase : Dict = path.split(os.sep )[-1].rsplit("." , 1 )[0]
__lowerCamelCase : List[str] = F"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"""
cva.imwrite(F"""{file_root}.jpg""" , _lowerCamelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" )
__lowerCamelCase : List[Any] = []
for anno in new_annos:
__lowerCamelCase : Any = anno[3] - anno[1]
__lowerCamelCase : Optional[int] = anno[4] - anno[2]
__lowerCamelCase : Optional[int] = anno[1] + width / 2
__lowerCamelCase : Union[str, Any] = anno[2] + height / 2
__lowerCamelCase : int = F"""{anno[0]} {x_center} {y_center} {width} {height}"""
annos_list.append(_lowerCamelCase )
with open(F"""{file_root}.txt""" , "w" ) as outfile:
outfile.write("\n".join(line for line in annos_list ) )
def lowercase_ ( _lowerCamelCase: str , _lowerCamelCase: str ) -> tuple[list, list]:
'''simple docstring'''
__lowerCamelCase : Optional[int] = []
__lowerCamelCase : Any = []
for label_file in glob.glob(os.path.join(_lowerCamelCase , "*.txt" ) ):
__lowerCamelCase : List[Any] = label_file.split(os.sep )[-1].rsplit("." , 1 )[0]
with open(_lowerCamelCase ) as in_file:
__lowerCamelCase : Tuple = in_file.readlines()
__lowerCamelCase : List[str] = os.path.join(_lowerCamelCase , F"""{label_name}.jpg""" )
__lowerCamelCase : Union[str, Any] = []
for obj_list in obj_lists:
__lowerCamelCase : str = obj_list.rstrip("\n" ).split(" " )
__lowerCamelCase : Union[str, Any] = float(obj[1] ) - float(obj[3] ) / 2
__lowerCamelCase : Tuple = float(obj[2] ) - float(obj[4] ) / 2
__lowerCamelCase : Union[str, Any] = float(obj[1] ) + float(obj[3] ) / 2
__lowerCamelCase : Any = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(_lowerCamelCase )
labels.append(_lowerCamelCase )
return img_paths, labels
def lowercase_ ( _lowerCamelCase: list , _lowerCamelCase: list , _lowerCamelCase: list[int] , _lowerCamelCase: tuple[int, int] , _lowerCamelCase: tuple[float, float] , _lowerCamelCase: float = 0.0 , ) -> tuple[list, list, str]:
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
__lowerCamelCase : Optional[Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__lowerCamelCase : List[str] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__lowerCamelCase : int = int(scale_x * output_size[1] )
__lowerCamelCase : Optional[Any] = int(scale_y * output_size[0] )
__lowerCamelCase : List[Any] = []
__lowerCamelCase : Optional[Any] = []
for i, index in enumerate(_lowerCamelCase ):
__lowerCamelCase : List[str] = all_img_list[index]
path_list.append(_lowerCamelCase )
__lowerCamelCase : Optional[Any] = all_annos[index]
__lowerCamelCase : List[str] = cva.imread(_lowerCamelCase )
if i == 0: # top-left
__lowerCamelCase : List[str] = cva.resize(_lowerCamelCase , (divid_point_x, divid_point_y) )
__lowerCamelCase : Any = img
for bbox in img_annos:
__lowerCamelCase : str = bbox[1] * scale_x
__lowerCamelCase : Union[str, Any] = bbox[2] * scale_y
__lowerCamelCase : Optional[int] = bbox[3] * scale_x
__lowerCamelCase : Union[str, Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
__lowerCamelCase : str = cva.resize(_lowerCamelCase , (output_size[1] - divid_point_x, divid_point_y) )
__lowerCamelCase : Any = img
for bbox in img_annos:
__lowerCamelCase : List[Any] = scale_x + bbox[1] * (1 - scale_x)
__lowerCamelCase : List[Any] = bbox[2] * scale_y
__lowerCamelCase : Tuple = scale_x + bbox[3] * (1 - scale_x)
__lowerCamelCase : Union[str, Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
__lowerCamelCase : Any = cva.resize(_lowerCamelCase , (divid_point_x, output_size[0] - divid_point_y) )
__lowerCamelCase : List[str] = img
for bbox in img_annos:
__lowerCamelCase : Any = bbox[1] * scale_x
__lowerCamelCase : Optional[int] = scale_y + bbox[2] * (1 - scale_y)
__lowerCamelCase : Dict = bbox[3] * scale_x
__lowerCamelCase : Tuple = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
__lowerCamelCase : int = cva.resize(
_lowerCamelCase , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
__lowerCamelCase : Optional[Any] = img
for bbox in img_annos:
__lowerCamelCase : Union[str, Any] = scale_x + bbox[1] * (1 - scale_x)
__lowerCamelCase : Union[str, Any] = scale_y + bbox[2] * (1 - scale_y)
__lowerCamelCase : int = scale_x + bbox[3] * (1 - scale_x)
__lowerCamelCase : int = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
__lowerCamelCase : str = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def lowercase_ ( _lowerCamelCase: int ) -> str:
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
__lowerCamelCase : Tuple = ascii_lowercase + digits
return "".join(random.choice(_lowerCamelCase ) for _ in range(_lowerCamelCase ) )
if __name__ == "__main__":
main()
print('''DONE ✅''') | 366 | 1 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _SCREAMING_SNAKE_CASE:
def __init__( self : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any]=13 , UpperCamelCase_ : Optional[int]=32 , UpperCamelCase_ : str=2 , UpperCamelCase_ : Optional[int]=3 , UpperCamelCase_ : Any=16 , UpperCamelCase_ : Union[str, Any]=[1, 2, 1] , UpperCamelCase_ : Any=[2, 2, 4] , UpperCamelCase_ : List[str]=2 , UpperCamelCase_ : Optional[int]=2.0 , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Optional[Any]=0.0 , UpperCamelCase_ : List[str]=0.0 , UpperCamelCase_ : Optional[int]=0.1 , UpperCamelCase_ : Optional[Any]="gelu" , UpperCamelCase_ : Dict=False , UpperCamelCase_ : List[Any]=True , UpperCamelCase_ : Dict=0.02 , UpperCamelCase_ : List[Any]=1e-5 , UpperCamelCase_ : int=True , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Any=10 , UpperCamelCase_ : Optional[Any]=8 , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ :List[str] = parent
SCREAMING_SNAKE_CASE__ :Tuple = batch_size
SCREAMING_SNAKE_CASE__ :Dict = image_size
SCREAMING_SNAKE_CASE__ :List[Any] = patch_size
SCREAMING_SNAKE_CASE__ :int = num_channels
SCREAMING_SNAKE_CASE__ :str = embed_dim
SCREAMING_SNAKE_CASE__ :List[Any] = depths
SCREAMING_SNAKE_CASE__ :Tuple = num_heads
SCREAMING_SNAKE_CASE__ :Union[str, Any] = window_size
SCREAMING_SNAKE_CASE__ :str = mlp_ratio
SCREAMING_SNAKE_CASE__ :Any = qkv_bias
SCREAMING_SNAKE_CASE__ :Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ :str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ :Dict = drop_path_rate
SCREAMING_SNAKE_CASE__ :Dict = hidden_act
SCREAMING_SNAKE_CASE__ :Any = use_absolute_embeddings
SCREAMING_SNAKE_CASE__ :Dict = patch_norm
SCREAMING_SNAKE_CASE__ :int = layer_norm_eps
SCREAMING_SNAKE_CASE__ :str = initializer_range
SCREAMING_SNAKE_CASE__ :Optional[int] = is_training
SCREAMING_SNAKE_CASE__ :List[Any] = scope
SCREAMING_SNAKE_CASE__ :Optional[int] = use_labels
SCREAMING_SNAKE_CASE__ :int = type_sequence_label_size
SCREAMING_SNAKE_CASE__ :str = encoder_stride
def __lowerCamelCase ( self : Union[str, Any] ) -> Any:
SCREAMING_SNAKE_CASE__ :List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ :Optional[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ :List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ :Optional[int] = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self : Any ) -> Optional[int]:
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __lowerCamelCase ( self : Optional[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int ) -> str:
SCREAMING_SNAKE_CASE__ :Union[str, Any] = SwinvaModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ :Dict = model(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Optional[int] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
SCREAMING_SNAKE_CASE__ :str = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __lowerCamelCase ( self : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Tuple ) -> Tuple:
SCREAMING_SNAKE_CASE__ :Optional[Any] = SwinvaForMaskedImageModeling(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ :List[Any] = model(UpperCamelCase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
SCREAMING_SNAKE_CASE__ :Optional[Any] = 1
SCREAMING_SNAKE_CASE__ :Any = SwinvaForMaskedImageModeling(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ :Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ :List[str] = model(UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __lowerCamelCase ( self : str , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Tuple ) -> List[str]:
SCREAMING_SNAKE_CASE__ :Dict = self.type_sequence_label_size
SCREAMING_SNAKE_CASE__ :List[str] = SwinvaForImageClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ :Dict = model(UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowerCamelCase ( self : Optional[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ :Dict = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Union[str, Any] = config_and_inputs
SCREAMING_SNAKE_CASE__ :Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
A_ : List[str] = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
A_ : Dict = (
{'feature-extraction': SwinvaModel, 'image-classification': SwinvaForImageClassification}
if is_torch_available()
else {}
)
A_ : Optional[int] = False
A_ : List[Any] = False
A_ : Optional[Any] = False
A_ : List[str] = False
def __lowerCamelCase ( self : List[Any] ) -> Any:
SCREAMING_SNAKE_CASE__ :Any = SwinvaModelTester(self )
SCREAMING_SNAKE_CASE__ :List[Any] = ConfigTester(self , config_class=UpperCamelCase_ , embed_dim=37 )
def __lowerCamelCase ( self : Tuple ) -> Union[str, Any]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCamelCase ( self : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
@unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.' )
def __lowerCamelCase ( self : List[str] ) -> List[str]:
pass
@unittest.skip(reason='Swinv2 does not use inputs_embeds' )
def __lowerCamelCase ( self : List[Any] ) -> Union[str, Any]:
pass
def __lowerCamelCase ( self : List[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ :Any = model_class(UpperCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE__ :int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase_ , nn.Linear ) )
def __lowerCamelCase ( self : List[Any] ) -> str:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ :List[str] = model_class(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ :Dict = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ :List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def __lowerCamelCase ( self : Optional[int] ) -> int:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Any = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ :Union[str, Any] = True
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ :str = True
SCREAMING_SNAKE_CASE__ :Dict = False
SCREAMING_SNAKE_CASE__ :Union[str, Any] = True
SCREAMING_SNAKE_CASE__ :List[str] = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ :Tuple = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
SCREAMING_SNAKE_CASE__ :str = outputs.attentions
SCREAMING_SNAKE_CASE__ :List[Any] = len(self.model_tester.depths )
self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE__ :Dict = True
SCREAMING_SNAKE_CASE__ :Any = config.window_size**2
SCREAMING_SNAKE_CASE__ :Dict = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ :Tuple = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
SCREAMING_SNAKE_CASE__ :Dict = outputs.attentions
self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = len(UpperCamelCase_ )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE__ :Union[str, Any] = True
SCREAMING_SNAKE_CASE__ :List[str] = True
SCREAMING_SNAKE_CASE__ :Optional[Any] = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ :List[Any] = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
if hasattr(self.model_tester , 'num_hidden_states_types' ):
SCREAMING_SNAKE_CASE__ :Dict = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
SCREAMING_SNAKE_CASE__ :Union[str, Any] = 2
self.assertEqual(out_len + added_hidden_states , len(UpperCamelCase_ ) )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = outputs.attentions
self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def __lowerCamelCase ( self : Union[str, Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ :Dict = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ :Optional[int] = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
SCREAMING_SNAKE_CASE__ :Tuple = outputs.hidden_states
SCREAMING_SNAKE_CASE__ :List[str] = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ )
# Swinv2 has a different seq_length
SCREAMING_SNAKE_CASE__ :Tuple = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE__ :Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
SCREAMING_SNAKE_CASE__ :List[Any] = outputs.reshaped_hidden_states
self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :List[Any] = reshaped_hidden_states[0].shape
SCREAMING_SNAKE_CASE__ :Union[str, Any] = (
reshaped_hidden_states[0].view(UpperCamelCase_ , UpperCamelCase_ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __lowerCamelCase ( self : Any ) -> Dict:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ :Optional[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ :List[Any] = True
self.check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ :Optional[Any] = True
self.check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def __lowerCamelCase ( self : Optional[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ :Tuple = 3
SCREAMING_SNAKE_CASE__ :Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
SCREAMING_SNAKE_CASE__ :Dict = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE__ :Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
SCREAMING_SNAKE_CASE__ :List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ :Any = True
self.check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ :Tuple = True
self.check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , (padded_height, padded_width) )
def __lowerCamelCase ( self : Tuple ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase_ )
def __lowerCamelCase ( self : Optional[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_ )
@slow
def __lowerCamelCase ( self : Any ) -> List[str]:
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ :Optional[Any] = SwinvaModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def __lowerCamelCase ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Dict = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ :List[Any] = _config_zero_init(UpperCamelCase_ )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ :Dict = model_class(config=UpperCamelCase_ )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class _SCREAMING_SNAKE_CASE( unittest.TestCase ):
@cached_property
def __lowerCamelCase ( self : Dict ) -> Any:
return (
AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' )
if is_vision_available()
else None
)
@slow
def __lowerCamelCase ( self : Tuple ) -> Any:
SCREAMING_SNAKE_CASE__ :Tuple = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ).to(
UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Optional[int] = self.default_image_processor
SCREAMING_SNAKE_CASE__ :int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
SCREAMING_SNAKE_CASE__ :List[Any] = image_processor(images=UpperCamelCase_ , return_tensors='pt' ).to(UpperCamelCase_ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ :List[Any] = model(**UpperCamelCase_ )
# verify the logits
SCREAMING_SNAKE_CASE__ :Optional[int] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1e-4 ) )
| 209 | '''simple docstring'''
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class _SCREAMING_SNAKE_CASE:
def __init__( self : Optional[int] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple=13 , UpperCamelCase_ : Union[str, Any]=7 , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Any=True , UpperCamelCase_ : int=True , UpperCamelCase_ : List[Any]=99 , UpperCamelCase_ : List[str]=64 , UpperCamelCase_ : List[str]=32 , UpperCamelCase_ : Union[str, Any]=5 , UpperCamelCase_ : Dict=4 , UpperCamelCase_ : List[str]=37 , UpperCamelCase_ : str="gelu" , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : Optional[Any]=5_12 , UpperCamelCase_ : Union[str, Any]=16 , UpperCamelCase_ : Union[str, Any]=2 , UpperCamelCase_ : Optional[int]=0.02 , UpperCamelCase_ : Optional[Any]=3 , UpperCamelCase_ : Any=4 , UpperCamelCase_ : int=None , ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ :Optional[int] = parent
SCREAMING_SNAKE_CASE__ :Any = batch_size
SCREAMING_SNAKE_CASE__ :Tuple = seq_length
SCREAMING_SNAKE_CASE__ :Any = is_training
SCREAMING_SNAKE_CASE__ :int = use_input_mask
SCREAMING_SNAKE_CASE__ :Dict = use_token_type_ids
SCREAMING_SNAKE_CASE__ :str = use_labels
SCREAMING_SNAKE_CASE__ :str = vocab_size
SCREAMING_SNAKE_CASE__ :Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE__ :List[Any] = embedding_size
SCREAMING_SNAKE_CASE__ :int = num_hidden_layers
SCREAMING_SNAKE_CASE__ :int = num_attention_heads
SCREAMING_SNAKE_CASE__ :Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE__ :str = hidden_act
SCREAMING_SNAKE_CASE__ :Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ :Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ :Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE__ :Optional[int] = type_vocab_size
SCREAMING_SNAKE_CASE__ :Tuple = type_sequence_label_size
SCREAMING_SNAKE_CASE__ :Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE__ :int = num_labels
SCREAMING_SNAKE_CASE__ :Optional[Any] = num_choices
SCREAMING_SNAKE_CASE__ :List[Any] = scope
def __lowerCamelCase ( self : Any ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ :List[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ :str = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ :Any = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ :List[str] = None
SCREAMING_SNAKE_CASE__ :int = None
SCREAMING_SNAKE_CASE__ :Tuple = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ :Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ :Any = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ :str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self : List[str] ) -> Any:
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
def __lowerCamelCase ( self : Optional[int] , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Any , UpperCamelCase_ : int ) -> int:
SCREAMING_SNAKE_CASE__ :Optional[int] = MegatronBertModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ :int = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :str = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :List[str] = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowerCamelCase ( self : str , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : Union[str, Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ :str = MegatronBertForMaskedLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ :Optional[int] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> Dict:
SCREAMING_SNAKE_CASE__ :Optional[int] = MegatronBertForCausalLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ :List[str] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self : List[str] , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[int] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ :Optional[int] = MegatronBertForNextSentencePrediction(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ :List[str] = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __lowerCamelCase ( self : Dict , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : str , UpperCamelCase_ : List[str] , UpperCamelCase_ : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] ) -> Any:
SCREAMING_SNAKE_CASE__ :Tuple = MegatronBertForPreTraining(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ :Union[str, Any] = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ , next_sentence_label=UpperCamelCase_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __lowerCamelCase ( self : Union[str, Any] , UpperCamelCase_ : int , UpperCamelCase_ : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ :int = MegatronBertForQuestionAnswering(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ :Tuple = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self : List[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Any ) -> Dict:
SCREAMING_SNAKE_CASE__ :List[Any] = self.num_labels
SCREAMING_SNAKE_CASE__ :Optional[int] = MegatronBertForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ :int = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self : Any , UpperCamelCase_ : int , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[Any] ) -> Any:
SCREAMING_SNAKE_CASE__ :Dict = self.num_labels
SCREAMING_SNAKE_CASE__ :Any = MegatronBertForTokenClassification(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ :Optional[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self : str , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int , UpperCamelCase_ : str , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ :Optional[Any] = self.num_choices
SCREAMING_SNAKE_CASE__ :Tuple = MegatronBertForMultipleChoice(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ :Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ :List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ :Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ :Union[str, Any] = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCamelCase ( self : Any ) -> int:
SCREAMING_SNAKE_CASE__ :Dict = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) :Union[str, Any] = config_and_inputs
SCREAMING_SNAKE_CASE__ :Tuple = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
A_ : List[Any] = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
A_ : List[Any] = (
{
'feature-extraction': MegatronBertModel,
'fill-mask': MegatronBertForMaskedLM,
'question-answering': MegatronBertForQuestionAnswering,
'text-classification': MegatronBertForSequenceClassification,
'text-generation': MegatronBertForCausalLM,
'token-classification': MegatronBertForTokenClassification,
'zero-shot': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ : int = True
# test_resize_embeddings = False
A_ : Dict = False
def __lowerCamelCase ( self : Union[str, Any] , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : str=False ) -> List[Any]:
SCREAMING_SNAKE_CASE__ :Optional[Any] = super()._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ )
if return_labels:
if model_class in get_values(UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ :str = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase_ )
return inputs_dict
def __lowerCamelCase ( self : Union[str, Any] ) -> Any:
SCREAMING_SNAKE_CASE__ :Dict = MegatronBertModelTester(self )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=37 )
def __lowerCamelCase ( self : List[str] ) -> Optional[int]:
self.config_tester.run_common_tests()
def __lowerCamelCase ( self : List[str] ) -> List[str]:
SCREAMING_SNAKE_CASE__ :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*UpperCamelCase_ )
def __lowerCamelCase ( self : Any ) -> Tuple:
SCREAMING_SNAKE_CASE__ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*UpperCamelCase_ )
def __lowerCamelCase ( self : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE__ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*UpperCamelCase_ )
def __lowerCamelCase ( self : str ) -> Dict:
SCREAMING_SNAKE_CASE__ :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*UpperCamelCase_ )
def __lowerCamelCase ( self : int ) -> Any:
SCREAMING_SNAKE_CASE__ :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*UpperCamelCase_ )
def __lowerCamelCase ( self : Dict ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*UpperCamelCase_ )
def __lowerCamelCase ( self : Any ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*UpperCamelCase_ )
def __lowerCamelCase ( self : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*UpperCamelCase_ )
def lowerCamelCase ( UpperCAmelCase__ : Dict ) -> Any:
'''simple docstring'''
return torch.tensor(
UpperCAmelCase__ , dtype=torch.long , device=UpperCAmelCase__ , )
UpperCamelCase_ = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class _SCREAMING_SNAKE_CASE( unittest.TestCase ):
@slow
@unittest.skip('Model is not available.' )
def __lowerCamelCase ( self : str ) -> int:
SCREAMING_SNAKE_CASE__ :Tuple = 'nvidia/megatron-bert-uncased-345m'
if "MYDIR" in os.environ:
SCREAMING_SNAKE_CASE__ :Dict = os.path.join(os.environ['MYDIR'] , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Dict = MegatronBertModel.from_pretrained(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.half()
SCREAMING_SNAKE_CASE__ :Optional[int] = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ :str = model(UpperCamelCase_ )[0]
SCREAMING_SNAKE_CASE__ :Dict = torch.Size((1, 9, 10_24) )
self.assertEqual(output.shape , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Tuple = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3 ):
for jj in range(3 ):
SCREAMING_SNAKE_CASE__ :List[Any] = output[0, ii, jj]
SCREAMING_SNAKE_CASE__ :List[str] = expected[3 * ii + jj]
SCREAMING_SNAKE_CASE__ :List[Any] = 'ii={} jj={} a={} b={}'.format(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
self.assertTrue(math.isclose(UpperCamelCase_ , UpperCamelCase_ , rel_tol=UpperCamelCase_ , abs_tol=UpperCamelCase_ ) , msg=UpperCamelCase_ )
| 209 | 1 |
'''simple docstring'''
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class __UpperCamelCase ( lowercase__ , unittest.TestCase ):
lowercase : str = ProphetNetTokenizer
lowercase : Union[str, Any] = False
def a__ ( self :Any ):
super().setUp()
snake_case_ : str = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
snake_case_ : Tuple = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def a__ ( self :str ,_UpperCamelCase :Dict ):
snake_case_ : Union[str, Any] = """UNwant\u00E9d,running"""
snake_case_ : Tuple = """unwanted, running"""
return input_text, output_text
def a__ ( self :int ):
snake_case_ : Dict = self.tokenizer_class(self.vocab_file )
snake_case_ : Any = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(_UpperCamelCase ,["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) ,[9, 6, 7, 1_2, 1_0, 1_1] )
def a__ ( self :Optional[Any] ):
snake_case_ : str = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) ,["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def a__ ( self :str ):
snake_case_ : List[Any] = BasicTokenizer(do_lower_case=_UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) ,["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] )
def a__ ( self :Optional[Any] ):
snake_case_ : List[str] = BasicTokenizer(do_lower_case=_UpperCamelCase ,strip_accents=_UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""h\u00E9llo"""] )
def a__ ( self :Union[str, Any] ):
snake_case_ : Tuple = BasicTokenizer(do_lower_case=_UpperCamelCase ,strip_accents=_UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] )
def a__ ( self :Tuple ):
snake_case_ : Optional[Any] = BasicTokenizer(do_lower_case=_UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] )
def a__ ( self :Optional[int] ):
snake_case_ : List[str] = BasicTokenizer(do_lower_case=_UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) ,["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def a__ ( self :Union[str, Any] ):
snake_case_ : Dict = BasicTokenizer(do_lower_case=_UpperCamelCase ,strip_accents=_UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def a__ ( self :List[str] ):
snake_case_ : Optional[Any] = BasicTokenizer(do_lower_case=_UpperCamelCase ,strip_accents=_UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def a__ ( self :Tuple ):
snake_case_ : List[Any] = BasicTokenizer(do_lower_case=_UpperCamelCase ,never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) ,["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def a__ ( self :List[Any] ):
snake_case_ : Any = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
snake_case_ : Optional[int] = {}
for i, token in enumerate(_UpperCamelCase ):
snake_case_ : str = i
snake_case_ : str = WordpieceTokenizer(vocab=_UpperCamelCase ,unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) ,[] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) ,["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) ,["""[UNK]""", """runn""", """##ing"""] )
@require_torch
def a__ ( self :str ):
snake_case_ : List[str] = self.tokenizer_class.from_pretrained("""microsoft/prophetnet-large-uncased""" )
snake_case_ : List[str] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
snake_case_ : Dict = [1_0_3_7, 2_1_4_6, 2_0_4_2_3, 2_0_0_5, 7_6_8_0, 7_8_4_9, 3_9_8_9, 1_0_1_2, 1_0_2]
snake_case_ : Any = tokenizer(_UpperCamelCase ,padding=_UpperCamelCase ,return_tensors="""pt""" )
self.assertIsInstance(_UpperCamelCase ,_UpperCamelCase )
snake_case_ : List[str] = list(batch.input_ids.numpy()[0] )
self.assertListEqual(_UpperCamelCase ,_UpperCamelCase )
self.assertEqual((2, 9) ,batch.input_ids.shape )
self.assertEqual((2, 9) ,batch.attention_mask.shape )
def a__ ( self :Union[str, Any] ):
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def a__ ( self :Tuple ):
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def a__ ( self :Optional[Any] ):
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
@slow
def a__ ( self :Optional[Any] ):
snake_case_ : Dict = self.tokenizer_class.from_pretrained("""microsoft/prophetnet-large-uncased""" )
snake_case_ : Tuple = tokenizer.encode("""sequence builders""" ,add_special_tokens=_UpperCamelCase )
snake_case_ : int = tokenizer.encode("""multi-sequence build""" ,add_special_tokens=_UpperCamelCase )
snake_case_ : List[str] = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase )
snake_case_ : Dict = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase ,_UpperCamelCase )
assert encoded_sentence == text + [1_0_2]
assert encoded_pair == text + [1_0_2] + text_a + [1_0_2] | 267 |
'''simple docstring'''
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __UpperCamelCase ( lowercase__ , unittest.TestCase ):
lowercase : Dict = TransfoXLTokenizer
lowercase : Optional[Any] = False
lowercase : Dict = False
def a__ ( self :Union[str, Any] ):
super().setUp()
snake_case_ : Optional[int] = [
"""<unk>""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""unwanted""",
"""wa""",
"""un""",
"""running""",
""",""",
"""low""",
"""l""",
]
snake_case_ : Optional[int] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def a__ ( self :List[Any] ,**_UpperCamelCase :Optional[Any] ):
snake_case_ : Tuple = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname ,**_UpperCamelCase )
def a__ ( self :Tuple ,_UpperCamelCase :Union[str, Any] ):
snake_case_ : Any = """<unk> UNwanted , running"""
snake_case_ : Optional[int] = """<unk> unwanted, running"""
return input_text, output_text
def a__ ( self :Dict ):
snake_case_ : Dict = TransfoXLTokenizer(vocab_file=self.vocab_file ,lower_case=_UpperCamelCase )
snake_case_ : Dict = tokenizer.tokenize("""<unk> UNwanted , running""" )
self.assertListEqual(_UpperCamelCase ,["""<unk>""", """unwanted""", """,""", """running"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) ,[0, 4, 8, 7] )
def a__ ( self :Optional[Any] ):
snake_case_ : Dict = TransfoXLTokenizer(lower_case=_UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) ,["""hello""", """!""", """how""", """are""", """you""", """?"""] )
def a__ ( self :Any ):
snake_case_ : List[Any] = TransfoXLTokenizer(lower_case=_UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) ,["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def a__ ( self :List[str] ):
snake_case_ : str = TransfoXLTokenizer(lower_case=_UpperCamelCase )
snake_case_ : List[str] = """Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?"""
snake_case_ : Optional[int] = [
"""Hello""",
"""(""",
"""bracket""",
""")""",
"""and""",
"""side""",
"""@-@""",
"""scrolled""",
"""[""",
"""and""",
"""]""",
"""Henry""",
"""'s""",
"""$""",
"""5""",
"""@,@""",
"""000""",
"""with""",
"""3""",
"""@.@""",
"""34""",
"""m""",
""".""",
"""What""",
"""'s""",
"""up""",
"""!""",
"""?""",
]
self.assertListEqual(tokenizer.tokenize(_UpperCamelCase ) ,_UpperCamelCase )
self.assertEqual(tokenizer.convert_tokens_to_string(_UpperCamelCase ) ,_UpperCamelCase )
def a__ ( self :Dict ):
snake_case_ : Union[str, Any] = self.get_tokenizer()
snake_case_ : Dict = len(_UpperCamelCase )
tokenizer.add_tokens(["""new1""", """new2"""] )
tokenizer.move_added_token("""new1""" ,1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(_UpperCamelCase ) ,original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode("""new1""" ) ,[1] )
self.assertEqual(tokenizer.decode([1] ) ,"""new1""" ) | 267 | 1 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class _A ( UpperCAmelCase_ ):
'''simple docstring'''
_snake_case : List[str] = """Wav2Vec2FeatureExtractor"""
_snake_case : List[str] = """AutoTokenizer"""
def __init__( self : Tuple , lowerCamelCase : Any , lowerCamelCase : str ):
'''simple docstring'''
super().__init__(a_ , a_ )
__lowercase = self.feature_extractor
__lowercase = False
@classmethod
def _snake_case ( cls : List[str] , lowerCamelCase : Optional[Any] , **lowerCamelCase : int ):
'''simple docstring'''
try:
return super().from_pretrained(a_ , **a_ )
except OSError:
warnings.warn(
f"""Loading a tokenizer inside {cls.__name__} from a config that does not"""
" include a `tokenizer_class` attribute is deprecated and will be "
"removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"
" attribute to either your `config.json` or `tokenizer_config.json` "
"file to suppress this warning: " , a_ , )
__lowercase = WavaVecaFeatureExtractor.from_pretrained(a_ , **a_ )
__lowercase = WavaVecaCTCTokenizer.from_pretrained(a_ , **a_ )
return cls(feature_extractor=a_ , tokenizer=a_ )
def __call__( self : List[str] , *lowerCamelCase : int , **lowerCamelCase : str ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*a_ , **a_ )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
__lowercase = kwargs.pop("raw_speech" )
else:
__lowercase = kwargs.pop("audio" , a_ )
__lowercase = kwargs.pop("sampling_rate" , a_ )
__lowercase = kwargs.pop("text" , a_ )
if len(a_ ) > 0:
__lowercase = args[0]
__lowercase = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
__lowercase = self.feature_extractor(a_ , *a_ , sampling_rate=a_ , **a_ )
if text is not None:
__lowercase = self.tokenizer(a_ , **a_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__lowercase = encodings["input_ids"]
return inputs
def _snake_case ( self : int , *lowerCamelCase : List[Any] , **lowerCamelCase : int ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor.pad(*a_ , **a_ )
__lowercase = kwargs.pop("input_features" , a_ )
__lowercase = kwargs.pop("labels" , a_ )
if len(a_ ) > 0:
__lowercase = args[0]
__lowercase = args[1:]
if input_features is not None:
__lowercase = self.feature_extractor.pad(a_ , *a_ , **a_ )
if labels is not None:
__lowercase = self.tokenizer.pad(a_ , **a_ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
__lowercase = labels["input_ids"]
return input_features
def _snake_case ( self : str , *lowerCamelCase : Tuple , **lowerCamelCase : Dict ):
'''simple docstring'''
return self.tokenizer.batch_decode(*a_ , **a_ )
def _snake_case ( self : Union[str, Any] , *lowerCamelCase : Dict , **lowerCamelCase : str ):
'''simple docstring'''
return self.tokenizer.decode(*a_ , **a_ )
@contextmanager
def _snake_case ( self : List[Any] ):
'''simple docstring'''
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
__lowercase = True
__lowercase = self.tokenizer
yield
__lowercase = self.feature_extractor
__lowercase = False
| 402 |
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def snake_case (UpperCamelCase : List[str] , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] ):
'''simple docstring'''
lowerCamelCase__ = OmegaConf.load(UpperCamelCase )
lowerCamelCase__ = torch.load(UpperCamelCase , map_location="""cpu""" )["""model"""]
lowerCamelCase__ = list(state_dict.keys() )
# extract state_dict for VQVAE
lowerCamelCase__ = {}
lowerCamelCase__ = """first_stage_model."""
for key in keys:
if key.startswith(UpperCamelCase ):
lowerCamelCase__ = state_dict[key]
# extract state_dict for UNetLDM
lowerCamelCase__ = {}
lowerCamelCase__ = """model.diffusion_model."""
for key in keys:
if key.startswith(UpperCamelCase ):
lowerCamelCase__ = state_dict[key]
lowerCamelCase__ = config.model.params.first_stage_config.params
lowerCamelCase__ = config.model.params.unet_config.params
lowerCamelCase__ = VQModel(**UpperCamelCase ).eval()
vqvae.load_state_dict(UpperCamelCase )
lowerCamelCase__ = UNetLDMModel(**UpperCamelCase ).eval()
unet.load_state_dict(UpperCamelCase )
lowerCamelCase__ = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule="""scaled_linear""" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=UpperCamelCase , )
lowerCamelCase__ = LDMPipeline(UpperCamelCase , UpperCamelCase , UpperCamelCase )
pipeline.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
a__ : Tuple = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", type=str, required=True)
parser.add_argument("""--config_path""", type=str, required=True)
parser.add_argument("""--output_path""", type=str, required=True)
a__ : List[str] = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 165 | 0 |
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __lowercase ( __lowerCamelCase , unittest.TestCase ):
# TODO: is there an appropriate internal test set?
snake_case_ = """ssube/stable-diffusion-x4-upscaler-onnx"""
def __lowercase ( self : Union[str, Any] ,A : Optional[Any]=0 ):
'''simple docstring'''
UpperCAmelCase__ : int = floats_tensor((1, 3, 128, 128) ,rng=random.Random(A ) )
UpperCAmelCase__ : int = torch.manual_seed(A )
UpperCAmelCase__ : List[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase__ : List[Any] = self.get_dummy_inputs()
UpperCAmelCase__ : List[str] = pipe(**A ).images
UpperCAmelCase__ : Any = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase__ : Optional[int] = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" )
UpperCAmelCase__ : Dict = PNDMScheduler.from_config(pipe.scheduler.config ,skip_prk_steps=A )
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase__ : Optional[Any] = self.get_dummy_inputs()
UpperCAmelCase__ : Optional[int] = pipe(**A ).images
UpperCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase__ : str = np.array(
[0.6_8_9_8_8_9_2, 0.5_9_2_4_0_5_5_6, 0.5_2_4_9_9_5_2_7, 0.5_8_8_6_6_2_1_5, 0.5_2_2_5_8_2_3_5, 0.5_2_5_7_2_7_1_5, 0.6_2_4_1_4_4_7_3, 0.6_1_7_4_3_8_7, 0.6_2_1_4_9_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" )
UpperCAmelCase__ : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase__ : Union[str, Any] = self.get_dummy_inputs()
UpperCAmelCase__ : Optional[int] = pipe(**A ).images
UpperCAmelCase__ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase__ : Optional[Any] = np.array(
[0.7_6_5_9_2_7_8, 0.7_6_4_3_7_6_6_4, 0.7_5_5_7_9_1_0_7, 0.7_6_9_1_1_1_6, 0.7_7_6_6_6_9_8_6, 0.7_7_2_7_6_7_2, 0.7_7_5_8_6_6_4, 0.7_8_1_2_2_2_6, 0.7_6_9_4_2_5_1_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" )
UpperCAmelCase__ : Dict = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase__ : Optional[int] = self.get_dummy_inputs()
UpperCAmelCase__ : Any = pipe(**A ).images
UpperCAmelCase__ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase__ : Optional[Any] = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" )
UpperCAmelCase__ : str = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase__ : str = self.get_dummy_inputs()
UpperCAmelCase__ : List[str] = pipe(**A ).images
UpperCAmelCase__ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase__ : Optional[int] = np.array(
[0.7_7_4_2_4_4_9_6, 0.7_7_3_6_0_1, 0.7_6_4_5_2_8_8, 0.7_7_6_9_5_9_8, 0.7_7_7_2_7_3_9, 0.7_7_3_8_6_8_8, 0.7_8_1_8_7_2_3_3, 0.7_7_8_7_9_5_8_4, 0.7_6_7_0_4_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
@property
def __lowercase ( self : Tuple ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : int = ort.SessionOptions()
UpperCAmelCase__ : List[Any] = False
return options
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
UpperCAmelCase__ : Optional[int] = init_image.resize((128, 128) )
# using the PNDM scheduler by default
UpperCAmelCase__ : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase__ : Optional[Any] = """A fantasy landscape, trending on artstation"""
UpperCAmelCase__ : str = torch.manual_seed(0 )
UpperCAmelCase__ : Dict = pipe(
prompt=A ,image=A ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=A ,output_type="""np""" ,)
UpperCAmelCase__ : List[Any] = output.images
UpperCAmelCase__ : Optional[int] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
UpperCAmelCase__ : Tuple = np.array([0.4_8_8_3, 0.4_9_4_7, 0.4_9_8_0, 0.4_9_7_5, 0.4_9_8_2, 0.4_9_8_0, 0.5_0_0_0, 0.5_0_0_6, 0.4_9_7_2] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def __lowercase ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
UpperCAmelCase__ : Optional[int] = init_image.resize((128, 128) )
UpperCAmelCase__ : List[str] = LMSDiscreteScheduler.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" ,subfolder="""scheduler""" )
UpperCAmelCase__ : int = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" ,scheduler=A ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase__ : Any = """A fantasy landscape, trending on artstation"""
UpperCAmelCase__ : int = torch.manual_seed(0 )
UpperCAmelCase__ : Tuple = pipe(
prompt=A ,image=A ,guidance_scale=7.5 ,num_inference_steps=20 ,generator=A ,output_type="""np""" ,)
UpperCAmelCase__ : List[str] = output.images
UpperCAmelCase__ : int = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
UpperCAmelCase__ : int = np.array(
[0.5_0_1_7_3_7_5_3, 0.5_0_2_2_3_3_5_6, 0.5_0_2_0_3_9, 0.5_0_2_3_3_0_3_6, 0.5_0_2_3_7_2_5, 0.5_0_2_2_6_0_1, 0.5_0_1_8_7_5_8, 0.5_0_2_3_4_0_8_5, 0.5_0_2_4_1_5_6_6] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 718 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class __lowercase :
def __init__( self : Optional[int] ,A : int ,A : int ,A : float = 0 ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : str = row, column
UpperCAmelCase__ : int = [[default_value for c in range(A )] for r in range(A )]
def __str__( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = f"Matrix consist of {self.row} rows and {self.column} columns\n"
# Make string identifier
UpperCAmelCase__ : Optional[Any] = 0
for row_vector in self.array:
for obj in row_vector:
UpperCAmelCase__ : Optional[int] = max(A ,len(str(A ) ) )
UpperCAmelCase__ : int = f"%{max_element_length}s"
# Make string and return
def single_line(A : list[float] ) -> str:
nonlocal string_format_identifier
UpperCAmelCase__ : List[str] = """["""
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(A ) for row_vector in self.array )
return s
def __repr__( self : Dict ):
'''simple docstring'''
return str(self )
def __lowercase ( self : int ,A : tuple[int, int] ):
'''simple docstring'''
if not (isinstance(A ,(list, tuple) ) and len(A ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Optional[int] ,A : tuple[int, int] ):
'''simple docstring'''
assert self.validate_indicies(A )
return self.array[loc[0]][loc[1]]
def __setitem__( self : List[Any] ,A : tuple[int, int] ,A : float ):
'''simple docstring'''
assert self.validate_indicies(A )
UpperCAmelCase__ : str = value
def __add__( self : Any ,A : Matrix ):
'''simple docstring'''
assert isinstance(A ,A )
assert self.row == another.row and self.column == another.column
# Add
UpperCAmelCase__ : Optional[int] = Matrix(self.row ,self.column )
for r in range(self.row ):
for c in range(self.column ):
UpperCAmelCase__ : Union[str, Any] = self[r, c] + another[r, c]
return result
def __neg__( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = Matrix(self.row ,self.column )
for r in range(self.row ):
for c in range(self.column ):
UpperCAmelCase__ : int = -self[r, c]
return result
def __sub__( self : str ,A : Matrix ):
'''simple docstring'''
return self + (-another)
def __mul__( self : Union[str, Any] ,A : int | float | Matrix ):
'''simple docstring'''
if isinstance(A ,(int, float) ): # Scalar multiplication
UpperCAmelCase__ : Dict = Matrix(self.row ,self.column )
for r in range(self.row ):
for c in range(self.column ):
UpperCAmelCase__ : Any = self[r, c] * another
return result
elif isinstance(A ,A ): # Matrix multiplication
assert self.column == another.row
UpperCAmelCase__ : Optional[int] = Matrix(self.row ,another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
UpperCAmelCase__ : Union[str, Any] = f"Unsupported type given for another ({type(A )})"
raise TypeError(A )
def __lowercase ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = Matrix(self.column ,self.row )
for r in range(self.row ):
for c in range(self.column ):
UpperCAmelCase__ : Optional[int] = self[r, c]
return result
def __lowercase ( self : Any ,A : Matrix ,A : Matrix ):
'''simple docstring'''
assert isinstance(A ,A ) and isinstance(A ,A )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
UpperCAmelCase__ : Optional[Any] = v.transpose()
UpperCAmelCase__ : Union[str, Any] = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = Matrix(3 , 3 , 0 )
for i in range(3 ):
UpperCAmelCase__ : List[str] = 1
print(F"a^(-1) is {ainv}" )
# u, v
UpperCAmelCase__ : List[Any] = Matrix(3 , 1 , 0 )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = 1, 2, -3
UpperCAmelCase__ : List[str] = Matrix(3 , 1 , 0 )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Tuple = 4, -2, 5
print(F"u is {u}" )
print(F"v is {v}" )
print(F"uv^T is {u * v.transpose()}" )
# Sherman Morrison
print(F"(a + uv^T)^(-1) is {ainv.sherman_morrison(__UpperCamelCase , __UpperCamelCase )}" )
def lowerCAmelCase ( ):
'''simple docstring'''
import doctest
doctest.testmod()
testa()
| 194 | 0 |
'''simple docstring'''
def _A ( A ,A ) -> float:
_validate_point(A )
_validate_point(A )
if len(A ) != len(A ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(A ,A ) ) )
def _A ( A ) -> None:
if point:
if isinstance(A ,A ):
for item in point:
if not isinstance(A ,(int, float) ):
lowercase : Any = (
"Expected a list of numbers as input, found "
F'''{type(A ).__name__}'''
)
raise TypeError(A )
else:
lowercase : Dict = F'''Expected a list of numbers as input, found {type(A ).__name__}'''
raise TypeError(A )
else:
raise ValueError("Missing an input" )
def _A ( A ,A ) -> float:
_validate_point(A )
_validate_point(A )
if len(A ) != len(A ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(A ,A ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 372 |
'''simple docstring'''
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowerCAmelCase : List[str] = re.compile("""[^A-Za-z_0-9]""")
# parameters used in DuplicationIndex
lowerCAmelCase : Union[str, Any] = 1_0
lowerCAmelCase : Optional[Any] = 2_5_6
def _A ( A ) -> Optional[MinHash]:
if len(A ) < MIN_NUM_TOKENS:
return None
lowercase : List[Any] = MinHash(num_perm=A )
for token in set(A ):
min_hash.update(token.encode() )
return min_hash
def _A ( A ) -> Set[str]:
return {t for t in NON_ALPHA.split(A ) if len(t.strip() ) > 0}
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , *,
a_ = 0.85 , ) -> List[str]:
lowercase : Any = duplication_jaccard_threshold
lowercase : str = NUM_PERM
lowercase : Union[str, Any] = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
lowercase : Any = defaultdict(a_ )
def a__ ( self , a_ , a_ ) -> None:
lowercase : Dict = self._index.query(a_ )
if code_key in self._index.keys:
print(F'''Duplicate key {code_key}''' )
return
self._index.insert(a_ , a_ )
if len(a_ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(a_ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(a_ )
def a__ ( self ) -> List[List[Dict]]:
lowercase : str = []
for base, duplicates in self._duplicate_clusters.items():
lowercase : str = [base] + list(a_ )
# reformat the cluster to be a list of dict
lowercase : Optional[Any] = [{"base_index": el[0], "repo_name": el[1], "path": el[2]} for el in cluster]
duplicate_clusters.append(a_ )
return duplicate_clusters
def a__ ( self , a_ ) -> None:
lowercase : Tuple = self.get_duplicate_clusters()
with open(a_ , "w" ) as f:
json.dump(a_ , a_ )
def _A ( A ) -> Dict:
lowercase , lowercase : List[str] = element
lowercase : int = get_min_hash([t for t in NON_ALPHA.split(data["content"] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def _A ( A ) -> Any:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash ,ThreadedIterator(A ,max_queue_size=1_0_0_0_0 ) ,chunksize=1_0_0 ,):
if data is not None:
yield data
def _A ( A ,A ) -> List[str]:
lowercase : Dict = DuplicationIndex(duplication_jaccard_threshold=A )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(A ) ) ,max_queue_size=1_0_0 ) ):
di.add(A ,A )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def _A ( A ,A ) -> float:
lowercase : int = get_tokens(A )
lowercase : Dict = get_tokens(A )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
lowerCAmelCase : List[str] = None
def _A ( A ,A ) -> Union[str, Any]:
lowercase : int = []
for elementa in cluster:
lowercase : Any = _shared_dataset[elementa["base_index"]]["content"]
for elementa in extremes:
lowercase : Optional[int] = _shared_dataset[elementa["base_index"]]["content"]
if jaccard_similarity(A ,A ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
lowercase : List[Any] = 1
extremes.append(A )
return extremes
def _A ( A ,A ,A ) -> Optional[Any]:
global _shared_dataset
lowercase : Dict = dataset
lowercase : int = []
lowercase : int = partial(_find_cluster_extremes_shared ,jaccard_threshold=A )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
A ,A ,) ,total=len(A ) ,):
extremes_list.append(A )
return extremes_list
def _A ( A ,A = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
lowercase : Dict = make_duplicate_clusters(A ,A )
lowercase : List[str] = {x["base_index"] for cluster in duplicate_clusters for x in cluster}
lowercase : Any = {}
lowercase : int = find_extremes(A ,A ,A )
for extremes in extremes_clusters:
for element in extremes:
lowercase : str = element
lowercase : str = duplicate_indices - set(extreme_dict.keys() )
lowercase : Any = dataset.filter(lambda A ,A : idx not in remove_indices ,with_indices=A )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
lowercase : List[str] = element["base_index"] in extreme_dict
if element["is_extreme"]:
lowercase : str = extreme_dict[element["base_index"]]["copies"]
print(F'''Original dataset size: {len(A )}''' )
print(F'''Number of duplicate clusters: {len(A )}''' )
print(F'''Files in duplicate cluster: {len(A )}''' )
print(F'''Unique files in duplicate cluster: {len(A )}''' )
print(F'''Filtered dataset size: {len(A )}''' )
return ds_filter, duplicate_clusters
| 372 | 1 |
'''simple docstring'''
from __future__ import annotations
import time
__lowercase = list[tuple[int, int]]
__lowercase = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__lowercase = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class a__:
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = pos_x
lowerCAmelCase = pos_y
lowerCAmelCase = (pos_y, pos_x)
lowerCAmelCase = goal_x
lowerCAmelCase = goal_y
lowerCAmelCase = parent
class a__:
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = Node(start[1] , start[0] , goal[1] , goal[0] , __lowerCAmelCase)
lowerCAmelCase = Node(goal[1] , goal[0] , goal[1] , goal[0] , __lowerCAmelCase)
lowerCAmelCase = [self.start]
lowerCAmelCase = False
def a_ ( self):
"""simple docstring"""
while self.node_queue:
lowerCAmelCase = self.node_queue.pop(0)
if current_node.pos == self.target.pos:
lowerCAmelCase = True
return self.retrace_path(__lowerCAmelCase)
lowerCAmelCase = self.get_successors(__lowerCAmelCase)
for node in successors:
self.node_queue.append(__lowerCAmelCase)
if not self.reached:
return [self.start.pos]
return None
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = []
for action in delta:
lowerCAmelCase = parent.pos_x + action[1]
lowerCAmelCase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(__lowerCAmelCase) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(__lowerCAmelCase , __lowerCAmelCase , self.target.pos_y , self.target.pos_x , __lowerCAmelCase))
return successors
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = node
lowerCAmelCase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x))
lowerCAmelCase = current_node.parent
path.reverse()
return path
class a__:
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = BreadthFirstSearch(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = BreadthFirstSearch(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = False
def a_ ( self):
"""simple docstring"""
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
lowerCAmelCase = self.fwd_bfs.node_queue.pop(0)
lowerCAmelCase = self.bwd_bfs.node_queue.pop(0)
if current_bwd_node.pos == current_fwd_node.pos:
lowerCAmelCase = True
return self.retrace_bidirectional_path(
__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = current_bwd_node
lowerCAmelCase = current_fwd_node
lowerCAmelCase = {
self.fwd_bfs: self.fwd_bfs.get_successors(__lowerCAmelCase),
self.bwd_bfs: self.bwd_bfs.get_successors(__lowerCAmelCase),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(__lowerCAmelCase)
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.fwd_bfs.retrace_path(__lowerCAmelCase)
lowerCAmelCase = self.bwd_bfs.retrace_path(__lowerCAmelCase)
bwd_path.pop()
bwd_path.reverse()
lowerCAmelCase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
__lowercase = (0, 0)
__lowercase = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__lowercase = time.time()
__lowercase = BreadthFirstSearch(init, goal)
__lowercase = bfs.search()
__lowercase = time.time() - start_bfs_time
print('''Unidirectional BFS computation time : ''', bfs_time)
__lowercase = time.time()
__lowercase = BidirectionalBreadthFirstSearch(init, goal)
__lowercase = bd_bfs.search()
__lowercase = time.time() - start_bd_bfs_time
print('''Bidirectional BFS computation time : ''', bd_bfs_time)
| 605 | '''simple docstring'''
import operator as op
def snake_case__ ( _A: Optional[Any] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase = []
lowerCAmelCase = lambda _A , _A : int(x / y ) # noqa: E731 integer division operation
lowerCAmelCase = {
"""^""": op.pow,
"""*""": op.mul,
"""/""": div,
"""+""": op.add,
"""-""": op.sub,
} # operators & their respective operation
# print table header
print("""Symbol""".center(8 ) , """Action""".center(12 ) , """Stack""" , sep=""" | """ )
print("""-""" * (30 + len(_A )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(_A ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ("""push(""" + x + """)""").ljust(12 ) , """,""".join(_A ) , sep=""" | """ )
else:
lowerCAmelCase = stack.pop() # pop stack
# output in tabular format
print("""""".rjust(8 ) , ("""pop(""" + b + """)""").ljust(12 ) , """,""".join(_A ) , sep=""" | """ )
lowerCAmelCase = stack.pop() # pop stack
# output in tabular format
print("""""".rjust(8 ) , ("""pop(""" + a + """)""").ljust(12 ) , """,""".join(_A ) , sep=""" | """ )
stack.append(
str(opr[x](int(_A ) , int(_A ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ("""push(""" + a + x + b + """)""").ljust(12 ) , """,""".join(_A ) , sep=""" | """ , )
return int(stack[0] )
if __name__ == "__main__":
__lowercase = input('''\n\nEnter a Postfix Equation (space separated) = ''').split(''' ''')
print('''\n\tResult = ''', solve(Postfix))
| 605 | 1 |
def lowerCamelCase_ ( __UpperCamelCase = 10**9 ):
A_ = 1
A_ = 2
A_ = 0
A_ = 0
A_ = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
A_ = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f'''{solution() = }''') | 141 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json""",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class lowerCamelCase ( _A ):
snake_case_ = "gpt_neox"
def __init__( self , a_=50_432 , a_=6_144 , a_=44 , a_=64 , a_=24_576 , a_="gelu" , a_=0.25 , a_=10_000 , a_=0.0 , a_=0.0 , a_=0.1 , a_=2_048 , a_=0.02 , a_=1e-5 , a_=True , a_=0 , a_=2 , a_=False , a_=True , a_=None , **a_ , ):
super().__init__(bos_token_id=a_ , eos_token_id=a_ , **a_ )
lowerCAmelCase : Dict = vocab_size
lowerCAmelCase : str = max_position_embeddings
lowerCAmelCase : int = hidden_size
lowerCAmelCase : Optional[Any] = num_hidden_layers
lowerCAmelCase : int = num_attention_heads
lowerCAmelCase : Any = intermediate_size
lowerCAmelCase : Tuple = hidden_act
lowerCAmelCase : Any = rotary_pct
lowerCAmelCase : str = rotary_emb_base
lowerCAmelCase : Tuple = attention_dropout
lowerCAmelCase : Optional[int] = hidden_dropout
lowerCAmelCase : int = classifier_dropout
lowerCAmelCase : str = initializer_range
lowerCAmelCase : str = layer_norm_eps
lowerCAmelCase : List[Any] = use_cache
lowerCAmelCase : Dict = tie_word_embeddings
lowerCAmelCase : Any = use_parallel_residual
lowerCAmelCase : Union[str, Any] = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"The hidden size is not divisble by the number of attention heads! Make sure to update them!" )
def _lowerCamelCase ( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , a_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
F'''got {self.rope_scaling}''' )
lowerCAmelCase : int = self.rope_scaling.get("type" , a_ )
lowerCAmelCase : int = self.rope_scaling.get("factor" , a_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(a_ , a_ ) or rope_scaling_factor <= 1.0:
raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 525 | 0 |
def UpperCamelCase__ ( UpperCAmelCase = 1000 ) -> Any:
"""simple docstring"""
_a , _a : int = 1, 1
_a : Dict = 2
while True:
_a : Tuple = 0
_a : str = fa + fa
_a , _a : Optional[Any] = fa, f
index += 1
for _ in str(UpperCAmelCase ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip()))) | 711 |
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
__lowerCamelCase = logging.get_logger(__name__)
logging.set_verbosity_info()
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase ) -> Dict:
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
_a : Any = XLMProphetNetForConditionalGenerationOld.from_pretrained(UpperCAmelCase )
_a , _a : List[Any] = XLMProphetNetForConditionalGeneration.from_pretrained(
UpperCAmelCase , output_loading_info=UpperCAmelCase )
else:
_a : Optional[Any] = ProphetNetForConditionalGenerationOld.from_pretrained(UpperCAmelCase )
_a , _a : str = ProphetNetForConditionalGeneration.from_pretrained(
UpperCAmelCase , output_loading_info=UpperCAmelCase )
_a : Optional[Any] = ['''key_proj''', '''value_proj''', '''query_proj''']
_a : int = {
'''self_attn''': '''ngram_self_attn''',
'''cross_attn''': '''encoder_attn''',
'''cross_attn_layer_norm''': '''encoder_attn_layer_norm''',
'''feed_forward_layer_norm''': '''final_layer_norm''',
'''feed_forward''': '''''',
'''intermediate''': '''fc1''',
'''output''': '''fc2''',
'''key_proj''': '''k_proj''',
'''query_proj''': '''q_proj''',
'''value_proj''': '''v_proj''',
'''word_embeddings''': '''embed_tokens''',
'''embeddings_layer_norm''': '''emb_layer_norm''',
'''relative_pos_embeddings''': '''relative_linear''',
'''ngram_embeddings''': '''ngram_input_embed''',
'''position_embeddings''': '''embed_positions''',
}
for key in loading_info["missing_keys"]:
_a : Dict = key.split('''.''' )
if attributes[0] == "lm_head":
_a : List[Any] = prophet
_a : Dict = prophet_old
else:
_a : List[Any] = prophet.prophetnet
_a : List[str] = prophet_old.model
_a : int = False
for attribute in attributes:
if attribute in mapping:
_a : Optional[int] = mapping[attribute]
if not hasattr(UpperCAmelCase , UpperCAmelCase ) and len(UpperCAmelCase ) > 0:
_a : Optional[Any] = attribute
elif hasattr(UpperCAmelCase , UpperCAmelCase ):
_a : Dict = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_a : str = old_model.weight
logger.info(F'{attribute} is initialized.' )
_a : int = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_a : Optional[Any] = old_model.bias
logger.info(F'{attribute} is initialized' )
_a : List[str] = True
break
elif attribute in special_keys and hasattr(UpperCAmelCase , '''in_proj_weight''' ):
_a : List[Any] = old_model.in_proj_weight.shape[0] // 3
_a : int = getattr(UpperCAmelCase , UpperCAmelCase )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_a : str = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_a : Dict = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_a : Any = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_a : List[str] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_a : int = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_a : Tuple = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_a : Any = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
_a : Union[str, Any] = nn.Parameter(old_model.embed_positions.weight[:512, :] )
_a : str = True
break
if attribute.isdigit():
_a : str = model[int(UpperCAmelCase )]
_a : Any = old_model[int(UpperCAmelCase )]
else:
_a : int = getattr(UpperCAmelCase , UpperCAmelCase )
if old_attribute == "":
_a : Optional[Any] = old_model
else:
if not hasattr(UpperCAmelCase , UpperCAmelCase ):
raise ValueError(F'{old_model} does not have {old_attribute}' )
_a : Tuple = getattr(UpperCAmelCase , UpperCAmelCase )
if not is_key_init:
raise ValueError(F'{key} was not correctly initialized!' )
print(F'Saving model to {pytorch_dump_folder_path}' )
prophet.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__lowerCamelCase = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path) | 307 | 0 |
'''simple docstring'''
def __UpperCAmelCase (lowercase__ ) -> int:
'''simple docstring'''
a_ = hex_num.strip()
if not hex_num:
raise ValueError("No value was passed to the function" )
a_ = hex_num[0] == "-"
if is_negative:
a_ = hex_num[1:]
try:
a_ = int(lowercase__ ,16 )
except ValueError:
raise ValueError("Invalid value was passed to the function" )
a_ = ""
while int_num > 0:
a_ = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("-" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 |
'''simple docstring'''
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class SCREAMING_SNAKE_CASE__ ( lowercase_ , lowercase_ ):
_UpperCAmelCase ='''pixel_values'''
_UpperCAmelCase =False
_UpperCAmelCase =TimmBackboneConfig
def __init__( self: Union[str, Any] , a: Union[str, Any] , **a: Tuple) ->Optional[Any]:
'''simple docstring'''
requires_backends(self , "timm")
super().__init__(a)
a_ = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name.")
if config.backbone not in timm.list_models():
raise ValueError(f"""backbone {config.backbone} is not supported by timm.""")
if hasattr(a , "out_features") and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead.")
a_ = getattr(a , "use_pretrained_backbone" , a)
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False.")
# We just take the final layer by default. This matches the default for the transformers models.
a_ = config.out_indices if getattr(a , "out_indices" , a) is not None else (-1,)
a_ = timm.create_model(
config.backbone , pretrained=a , features_only=config.features_only , in_chans=config.num_channels , out_indices=a , **a , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
a_ = self._backbone.return_layers
a_ = {layer["module"]: str(a) for i, layer in enumerate(self._backbone.feature_info.info)}
super()._init_backbone(a)
@classmethod
def _lowerCAmelCase ( cls: Tuple , a: Optional[Any] , *a: Optional[Any] , **a: str) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ["vision", "timm"])
from ...models.timm_backbone import TimmBackboneConfig
a_ = kwargs.pop("config" , TimmBackboneConfig())
a_ = kwargs.pop("use_timm_backbone" , a)
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones")
a_ = kwargs.pop("num_channels" , config.num_channels)
a_ = kwargs.pop("features_only" , config.features_only)
a_ = kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone)
a_ = kwargs.pop("out_indices" , config.out_indices)
a_ = TimmBackboneConfig(
backbone=a , num_channels=a , features_only=a , use_pretrained_backbone=a , out_indices=a , )
return super()._from_config(a , **a)
def _lowerCAmelCase ( self: Optional[Any] , a: Optional[int]) ->str:
'''simple docstring'''
pass
def _lowerCAmelCase ( self: Tuple , a: List[Any] , a: Any=None , a: Dict=None , a: Optional[int]=None , **a: int) ->Union[BackboneOutput, Tuple[Tensor, ...]]:
'''simple docstring'''
a_ = return_dict if return_dict is not None else self.config.use_return_dict
a_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a_ = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment")
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
a_ = self._all_layers
a_ = self._backbone(a , **a)
a_ = self._return_layers
a_ = tuple(hidden_states[i] for i in self.out_indices)
else:
a_ = self._backbone(a , **a)
a_ = None
a_ = tuple(a)
a_ = tuple(a) if hidden_states is not None else None
if not return_dict:
a_ = (feature_maps,)
if output_hidden_states:
a_ = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=a , hidden_states=a , attentions=a)
| 685 | 1 |
def SCREAMING_SNAKE_CASE ( snake_case__ = 1000 ) -> int:
'''simple docstring'''
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 717 |
UpperCamelCase_ = {
'meter': 'm',
'kilometer': 'km',
'megametre': 'Mm',
'gigametre': 'Gm',
'terametre': 'Tm',
'petametre': 'Pm',
'exametre': 'Em',
'zettametre': 'Zm',
'yottametre': 'Ym',
}
# Exponent of the factor(meter)
UpperCamelCase_ = {
'm': 0,
'km': 3,
'Mm': 6,
'Gm': 9,
'Tm': 1_2,
'Pm': 1_5,
'Em': 1_8,
'Zm': 2_1,
'Ym': 2_4,
}
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__ ) -> float:
__UpperCAmelCase =from_type.lower().strip('''s''' )
__UpperCAmelCase =to_type.lower().strip('''s''' )
__UpperCAmelCase =UNIT_SYMBOL.get(snake_case__ , snake_case__ )
__UpperCAmelCase =UNIT_SYMBOL.get(snake_case__ , snake_case__ )
if from_sanitized not in METRIC_CONVERSION:
__UpperCAmelCase =(
f"""Invalid 'from_type' value: {from_type!r}.\n"""
f"""Conversion abbreviations are: {', '.join(snake_case__ )}"""
)
raise ValueError(snake_case__ )
if to_sanitized not in METRIC_CONVERSION:
__UpperCAmelCase =(
f"""Invalid 'to_type' value: {to_type!r}.\n"""
f"""Conversion abbreviations are: {', '.join(snake_case__ )}"""
)
raise ValueError(snake_case__ )
__UpperCAmelCase =METRIC_CONVERSION[from_sanitized]
__UpperCAmelCase =METRIC_CONVERSION[to_sanitized]
__UpperCAmelCase =1
if from_exponent > to_exponent:
__UpperCAmelCase =from_exponent - to_exponent
else:
__UpperCAmelCase =-(to_exponent - from_exponent)
return value * pow(10 , snake_case__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 142 | 0 |
'''simple docstring'''
import torch
from transformers import AutoModel
class A ( torch.nn.Module ):
def __init__( self : str , __magic_name__ : List[Any]="sayef/fsner-bert-base-uncased" ):
"""simple docstring"""
super(__magic_name__ , self ).__init__()
lowerCAmelCase__ = AutoModel.from_pretrained(__magic_name__ , return_dict=__magic_name__ )
lowerCAmelCase__ = torch.nn.CosineSimilarity(3 , 1E-08 )
lowerCAmelCase__ = torch.nn.Softmax(dim=1 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , **__magic_name__ : Union[str, Any] ):
"""simple docstring"""
return self.bert(**__magic_name__ ).last_hidden_state
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Dict ):
"""simple docstring"""
return token_embeddings.sum(2 , keepdim=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : List[str] , __magic_name__ : List[str] , __magic_name__ : Optional[int]=1 ):
"""simple docstring"""
return self.softmax(T * self.cos(__magic_name__ , __magic_name__ ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : int , __magic_name__ : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = W_supports["sizes"].tolist()
lowerCAmelCase__ = W_supports["start_token_id"].item()
lowerCAmelCase__ = W_supports["end_token_id"].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
lowerCAmelCase__ = self.BERT(**__magic_name__ )
lowerCAmelCase__ = self.BERT(**__magic_name__ )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = W_supports["input_ids"] == start_token_id
lowerCAmelCase__ = W_supports["input_ids"] == end_token_id
for i, size in enumerate(__magic_name__ ):
if i == 0:
lowerCAmelCase__ = 0
else:
lowerCAmelCase__ = support_sizes[i - 1]
lowerCAmelCase__ = S[s : s + size][start_token_masks[s : s + size]]
lowerCAmelCase__ = S[s : s + size][end_token_masks[s : s + size]]
lowerCAmelCase__ = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
lowerCAmelCase__ = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
lowerCAmelCase__ = torch.vstack((p_starts, p_start) )
lowerCAmelCase__ = torch.vstack((p_ends, p_end) )
else:
lowerCAmelCase__ = p_start
lowerCAmelCase__ = p_end
return p_starts, p_ends
| 48 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> int:
assert (
isinstance(_UpperCAmelCase , _UpperCAmelCase ) and number_of_steps > 0
), F'''number_of_steps needs to be positive integer, your input {number_of_steps}'''
if number_of_steps == 1:
return 1
__snake_case , __snake_case = 1, 1
for _ in range(number_of_steps - 1 ):
__snake_case , __snake_case = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 69 | 0 |
def _lowerCAmelCase ( A__: int = 3 , A__: int = 7 , A__: int = 100_0000 ):
'''simple docstring'''
UpperCAmelCase = 0
UpperCAmelCase = 1
for current_denominator in range(1 , limit + 1 ):
UpperCAmelCase = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
UpperCAmelCase = current_numerator
UpperCAmelCase = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1000000))
| 721 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
class lowercase :
'''simple docstring'''
def __init__( self , _snake_case ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = [[] for _ in range(_snake_case )]
UpperCAmelCase = size
def __getitem__( self , _snake_case ) -> Iterator[Edge]:
"""simple docstring"""
return iter(self._graph[vertex] )
@property
def snake_case_ ( self ) -> Optional[int]:
"""simple docstring"""
return self._size
def snake_case_ ( self , _snake_case , _snake_case , _snake_case ) -> Dict:
"""simple docstring"""
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''' )
self._graph[from_vertex].append(Edge(_snake_case , _snake_case ) )
def snake_case_ ( self , _snake_case , _snake_case ) -> int | None:
"""simple docstring"""
UpperCAmelCase = deque([start_vertex] )
UpperCAmelCase = [None] * self.size
UpperCAmelCase = 0
while queue:
UpperCAmelCase = queue.popleft()
UpperCAmelCase = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
UpperCAmelCase = current_distance + edge.weight
UpperCAmelCase = distances[edge.destination_vertex]
if (
isinstance(_snake_case , _snake_case )
and new_distance >= dest_vertex_distance
):
continue
UpperCAmelCase = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 391 | 0 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''microsoft/conditional-detr-resnet-50''': (
'''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'''
),
}
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : List[Any] = """conditional_detr"""
_UpperCamelCase : Any = ["""past_key_values"""]
_UpperCamelCase : Optional[Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , snake_case=True , snake_case=None , snake_case=3 , snake_case=300 , snake_case=6 , snake_case=2048 , snake_case=8 , snake_case=6 , snake_case=2048 , snake_case=8 , snake_case=0.0 , snake_case=0.0 , snake_case=True , snake_case="relu" , snake_case=256 , snake_case=0.1 , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=1.0 , snake_case=False , snake_case="sine" , snake_case="resnet50" , snake_case=True , snake_case=False , snake_case=2 , snake_case=5 , snake_case=2 , snake_case=1 , snake_case=1 , snake_case=2 , snake_case=5 , snake_case=2 , snake_case=0.25 , **snake_case , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
lowercase = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(snake_case , snake_case ):
lowercase = backbone_config.get('model_type' )
lowercase = CONFIG_MAPPING[backbone_model_type]
lowercase = config_class.from_dict(snake_case )
lowercase = use_timm_backbone
lowercase = backbone_config
lowercase = num_channels
lowercase = num_queries
lowercase = d_model
lowercase = encoder_ffn_dim
lowercase = encoder_layers
lowercase = encoder_attention_heads
lowercase = decoder_ffn_dim
lowercase = decoder_layers
lowercase = decoder_attention_heads
lowercase = dropout
lowercase = attention_dropout
lowercase = activation_dropout
lowercase = activation_function
lowercase = init_std
lowercase = init_xavier_std
lowercase = encoder_layerdrop
lowercase = decoder_layerdrop
lowercase = encoder_layers
lowercase = auxiliary_loss
lowercase = position_embedding_type
lowercase = backbone
lowercase = use_pretrained_backbone
lowercase = dilation
# Hungarian matcher
lowercase = class_cost
lowercase = bbox_cost
lowercase = giou_cost
# Loss coefficients
lowercase = mask_loss_coefficient
lowercase = dice_loss_coefficient
lowercase = cls_loss_coefficient
lowercase = bbox_loss_coefficient
lowercase = giou_loss_coefficient
lowercase = focal_alpha
super().__init__(is_encoder_decoder=snake_case , **snake_case )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self.d_model
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowercase = self.backbone_config.to_dict()
lowercase = self.__class__.model_type
return output
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : List[str] = version.parse("""1.11""" )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return 1E-5
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return 12
| 84 |
"""simple docstring"""
from __future__ import annotations
from functools import lru_cache
from math import ceil
_a = 100
_a = set(range(3, NUM_PRIMES, 2))
primes.add(2)
_a = 42
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_00 )
def lowerCamelCase__ ( __snake_case ) -> set[int]:
"""simple docstring"""
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
_UpperCamelCase = set()
_UpperCamelCase = 42
_UpperCamelCase = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def lowerCamelCase__ ( __snake_case = 50_00 ) -> int | None:
"""simple docstring"""
for number_to_partition in range(1, __snake_case ):
if len(partition(__snake_case ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 19 | 0 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
lowerCAmelCase_ : Optional[Any] = R'\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `" / "`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `" // "`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `"wiki_dpr"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `"train"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `"compressed"`)\n The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and\n `"compressed"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a "dummy" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n'
@add_start_docstrings(lowerCamelCase_ )
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='rag'
__a =True
def __init__( self : Union[str, Any] , __a : str=None , __a : List[str]=True , __a : str=None , __a : Any=None , __a : Any=None , __a : Optional[Any]=None , __a : List[Any]=None , __a : Union[str, Any]=" / " , __a : Optional[int]=" // " , __a : Optional[int]=5 , __a : str=3_00 , __a : Dict=7_68 , __a : Any=8 , __a : int="wiki_dpr" , __a : int="train" , __a : Optional[int]="compressed" , __a : List[Any]=None , __a : Any=None , __a : int=False , __a : Union[str, Any]=False , __a : str=0.0 , __a : Optional[int]=True , __a : Any=False , __a : int=False , __a : Union[str, Any]=False , __a : Dict=True , __a : Dict=None , **__a : str , ):
super().__init__(
bos_token_id=__a , pad_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , forced_eos_token_id=__a , is_encoder_decoder=__a , prefix=__a , vocab_size=__a , **__a , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_a = kwargs.pop("question_encoder" )
_a = question_encoder_config.pop("model_type" )
_a = kwargs.pop("generator" )
_a = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
_a = AutoConfig.for_model(__a , **__a )
_a = AutoConfig.for_model(__a , **__a )
_a = reduce_loss
_a = label_smoothing
_a = exclude_bos_score
_a = do_marginalize
_a = title_sep
_a = doc_sep
_a = n_docs
_a = max_combined_length
_a = dataset
_a = dataset_split
_a = index_name
_a = retrieval_vector_size
_a = retrieval_batch_size
_a = passages_path
_a = index_path
_a = use_dummy_dataset
_a = output_retrieved
_a = do_deduplication
_a = use_cache
if self.forced_eos_token_id is None:
_a = getattr(self.generator , "forced_eos_token_id" , __a )
@classmethod
def UpperCamelCase__ ( cls : Optional[int] , __a : PretrainedConfig , __a : PretrainedConfig , **__a : Union[str, Any] ):
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **__a )
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = copy.deepcopy(self.__dict__ )
_a = self.question_encoder.to_dict()
_a = self.generator.to_dict()
_a = self.__class__.model_type
return output
| 711 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =['image_processor', 'tokenizer']
__a ='CLIPImageProcessor'
__a =('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__( self : str , __a : List[Any]=None , __a : Any=None , **__a : int ):
_a = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
_a = kwargs.pop("feature_extractor" )
_a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
def __call__( self : List[Any] , __a : Optional[int]=None , __a : Dict=None , __a : List[Any]=None , **__a : Dict ):
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
_a = self.tokenizer(__a , return_tensors=__a , **__a )
if images is not None:
_a = self.image_processor(__a , return_tensors=__a , **__a )
if text is not None and images is not None:
_a = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__a ) , tensor_type=__a )
def UpperCamelCase__ ( self : Tuple , *__a : List[Any] , **__a : Tuple ):
return self.tokenizer.batch_decode(*__a , **__a )
def UpperCamelCase__ ( self : List[Any] , *__a : Union[str, Any] , **__a : Optional[Any] ):
return self.tokenizer.decode(*__a , **__a )
@property
def UpperCamelCase__ ( self : List[str] ):
_a = self.tokenizer.model_input_names
_a = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 521 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_A: List[Any] = logging.get_logger(__name__)
def _lowerCAmelCase ( _lowerCAmelCase )-> List[List[ImageInput]]:
if isinstance(snake_case_ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(snake_case_ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(snake_case_ ):
return [[videos]]
raise ValueError(F'Could not make batched video from {videos}' )
class UpperCAmelCase ( __lowercase ):
_A : Union[str, Any] = ["""pixel_values"""]
def __init__( self , __A = True , __A = None , __A = PILImageResampling.BILINEAR , __A = True , __A = None , __A = True , __A = 1 / 255 , __A = True , __A = None , __A = None , **__A , ):
super().__init__(**__A )
__UpperCAmelCase = size if size is not None else {'shortest_edge': 224}
__UpperCAmelCase = get_size_dict(__A , default_to_square=__A )
__UpperCAmelCase = crop_size if crop_size is not None else {'height': 224, 'width': 224}
__UpperCAmelCase = get_size_dict(__A , param_name='crop_size' )
__UpperCAmelCase = do_resize
__UpperCAmelCase = size
__UpperCAmelCase = do_center_crop
__UpperCAmelCase = crop_size
__UpperCAmelCase = resample
__UpperCAmelCase = do_rescale
__UpperCAmelCase = rescale_factor
__UpperCAmelCase = do_normalize
__UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCamelCase ( self , __A , __A , __A = PILImageResampling.BILINEAR , __A = None , **__A , ):
__UpperCAmelCase = get_size_dict(__A , default_to_square=__A )
if "shortest_edge" in size:
__UpperCAmelCase = get_resize_output_image_size(__A , size['shortest_edge'] , default_to_square=__A )
elif "height" in size and "width" in size:
__UpperCAmelCase = (size['height'], size['width'])
else:
raise ValueError(f'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
return resize(__A , size=__A , resample=__A , data_format=__A , **__A )
def __lowerCamelCase ( self , __A , __A , __A = None , **__A , ):
__UpperCAmelCase = get_size_dict(__A )
if "height" not in size or "width" not in size:
raise ValueError(f'Size must have \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(__A , size=(size['height'], size['width']) , data_format=__A , **__A )
def __lowerCamelCase ( self , __A , __A , __A = None , **__A , ):
return rescale(__A , scale=__A , data_format=__A , **__A )
def __lowerCamelCase ( self , __A , __A , __A , __A = None , **__A , ):
return normalize(__A , mean=__A , std=__A , data_format=__A , **__A )
def __lowerCamelCase ( self , __A , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = ChannelDimension.FIRST , ):
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
__UpperCAmelCase = to_numpy_array(__A )
if do_resize:
__UpperCAmelCase = self.resize(image=__A , size=__A , resample=__A )
if do_center_crop:
__UpperCAmelCase = self.center_crop(__A , size=__A )
if do_rescale:
__UpperCAmelCase = self.rescale(image=__A , scale=__A )
if do_normalize:
__UpperCAmelCase = self.normalize(image=__A , mean=__A , std=__A )
__UpperCAmelCase = to_channel_dimension_format(__A , __A )
return image
def __lowerCamelCase ( self , __A , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = ChannelDimension.FIRST , **__A , ):
__UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
__UpperCAmelCase = resample if resample is not None else self.resample
__UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
__UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
__UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
__UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
__UpperCAmelCase = image_std if image_std is not None else self.image_std
__UpperCAmelCase = size if size is not None else self.size
__UpperCAmelCase = get_size_dict(__A , default_to_square=__A )
__UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
__UpperCAmelCase = get_size_dict(__A , param_name='crop_size' )
if not valid_images(__A ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
__UpperCAmelCase = make_batched(__A )
__UpperCAmelCase = [
[
self._preprocess_image(
image=__A , do_resize=__A , size=__A , resample=__A , do_center_crop=__A , crop_size=__A , do_rescale=__A , rescale_factor=__A , do_normalize=__A , image_mean=__A , image_std=__A , data_format=__A , )
for img in video
]
for video in videos
]
__UpperCAmelCase = {'pixel_values': videos}
return BatchFeature(data=__A , tensor_type=__A )
| 126 |
"""simple docstring"""
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCamelCase ( __lowercase , unittest.TestCase ):
__UpperCamelCase = LayoutLMTokenizer
__UpperCamelCase = LayoutLMTokenizerFast
__UpperCamelCase = True
__UpperCamelCase = True
def A__ (self ):
'''simple docstring'''
super().setUp()
_lowerCAmelCase = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def A__ (self , **lowerCamelCase ):
'''simple docstring'''
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = """UNwant\u00E9d,running"""
_lowerCAmelCase = """unwanted, running"""
return input_text, output_text
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.tokenizer_class(self.vocab_file )
_lowerCAmelCase = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(lowerCamelCase , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) , [7, 4, 5, 10, 8, 9] )
def A__ (self ):
'''simple docstring'''
pass | 156 | 0 |
def __UpperCamelCase ( lowerCAmelCase__ : str ):
assert column_title.isupper()
__a : Optional[Any] = 0
__a : Union[str, Any] = len(lowerCAmelCase__ ) - 1
__a : Union[str, Any] = 0
while index >= 0:
__a : str = (ord(column_title[index] ) - 6_4) * pow(2_6 , lowerCAmelCase__ )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 326 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase__ =logging.get_logger(__name__)
lowercase__ ={
'microsoft/focalnet-tiny': 'https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json',
}
class UpperCamelCase__ ( __lowercase ,__lowercase ):
_SCREAMING_SNAKE_CASE : str = "focalnet"
def __init__(self : str , snake_case_ : List[str]=2_2_4 , snake_case_ : Dict=4 , snake_case_ : Union[str, Any]=3 , snake_case_ : int=9_6 , snake_case_ : Tuple=False , snake_case_ : Union[str, Any]=[1_9_2, 3_8_4, 7_6_8, 7_6_8] , snake_case_ : int=[2, 2, 6, 2] , snake_case_ : List[str]=[2, 2, 2, 2] , snake_case_ : Any=[3, 3, 3, 3] , snake_case_ : Optional[Any]="gelu" , snake_case_ : Optional[int]=4.0 , snake_case_ : List[str]=0.0 , snake_case_ : Optional[int]=0.1 , snake_case_ : Optional[Any]=False , snake_case_ : int=1E-4 , snake_case_ : Dict=False , snake_case_ : Optional[Any]=False , snake_case_ : List[str]=False , snake_case_ : str=0.02 , snake_case_ : List[str]=1E-5 , snake_case_ : Tuple=3_2 , snake_case_ : str=None , snake_case_ : Dict=None , **snake_case_ : Tuple , ):
super().__init__(**snake_case_ )
__a : Tuple = image_size
__a : List[str] = patch_size
__a : List[str] = num_channels
__a : Any = embed_dim
__a : List[Any] = use_conv_embed
__a : Any = hidden_sizes
__a : Optional[Any] = depths
__a : Tuple = focal_levels
__a : Optional[Any] = focal_windows
__a : List[Any] = hidden_act
__a : Dict = mlp_ratio
__a : Tuple = hidden_dropout_prob
__a : List[Any] = drop_path_rate
__a : List[Any] = use_layerscale
__a : Optional[int] = layerscale_value
__a : Any = use_post_layernorm
__a : List[Any] = use_post_layernorm_in_modulation
__a : Any = normalize_modulator
__a : List[Any] = initializer_range
__a : List[Any] = layer_norm_eps
__a : List[Any] = encoder_stride
__a : Union[str, Any] = ['''stem'''] + [f"stage{idx}" for idx in range(1 , len(self.depths ) + 1 )]
__a , __a : List[str] = get_aligned_output_features_output_indices(
out_features=snake_case_ , out_indices=snake_case_ , stage_names=self.stage_names )
| 326 | 1 |
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def snake_case__ ( UpperCAmelCase : int ):
if "model" in orig_key:
lowerCAmelCase__ :Optional[Any] = orig_key.replace("model." , "" )
if "norm1" in orig_key:
lowerCAmelCase__ :Any = orig_key.replace("norm1" , "attention.output.LayerNorm" )
if "norm2" in orig_key:
lowerCAmelCase__ :int = orig_key.replace("norm2" , "output.LayerNorm" )
if "norm" in orig_key:
lowerCAmelCase__ :Union[str, Any] = orig_key.replace("norm" , "LayerNorm" )
if "transformer" in orig_key:
lowerCAmelCase__ :str = orig_key.split("." )[0].split("_" )[-1]
lowerCAmelCase__ :List[str] = orig_key.replace(F'''transformer_{layer_num}''' , F'''encoder.layer.{layer_num}''' )
if "mha.attn" in orig_key:
lowerCAmelCase__ :Optional[Any] = orig_key.replace("mha.attn" , "attention.self" )
if "mha" in orig_key:
lowerCAmelCase__ :List[str] = orig_key.replace("mha" , "attention" )
if "W_q" in orig_key:
lowerCAmelCase__ :List[Any] = orig_key.replace("W_q" , "self.query" )
if "W_k" in orig_key:
lowerCAmelCase__ :Union[str, Any] = orig_key.replace("W_k" , "self.key" )
if "W_v" in orig_key:
lowerCAmelCase__ :Union[str, Any] = orig_key.replace("W_v" , "self.value" )
if "ff1" in orig_key:
lowerCAmelCase__ :Union[str, Any] = orig_key.replace("ff1" , "intermediate.dense" )
if "ff2" in orig_key:
lowerCAmelCase__ :Any = orig_key.replace("ff2" , "output.dense" )
if "ff" in orig_key:
lowerCAmelCase__ :Any = orig_key.replace("ff" , "output.dense" )
if "mlm_class" in orig_key:
lowerCAmelCase__ :List[str] = orig_key.replace("mlm.mlm_class" , "cls.predictions.decoder" )
if "mlm" in orig_key:
lowerCAmelCase__ :Any = orig_key.replace("mlm" , "cls.predictions.transform" )
if "cls" not in orig_key:
lowerCAmelCase__ :List[str] = """yoso.""" + orig_key
return orig_key
def snake_case__ ( UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int] ):
for key in orig_state_dict.copy().keys():
lowerCAmelCase__ :Dict = orig_state_dict.pop(snake_case_ )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
lowerCAmelCase__ :Dict = val
lowerCAmelCase__ :List[str] = orig_state_dict["""cls.predictions.decoder.bias"""]
lowerCAmelCase__ :str = torch.arange(snake_case_ ).expand((1, -1) ) + 2
return orig_state_dict
def snake_case__ ( UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : List[Any] ):
lowerCAmelCase__ :str = torch.load(snake_case_ , map_location="cpu" )["""model_state_dict"""]
lowerCAmelCase__ :str = YosoConfig.from_json_file(snake_case_ )
lowerCAmelCase__ :List[Any] = YosoForMaskedLM(snake_case_ )
lowerCAmelCase__ :Tuple = convert_checkpoint_helper(config.max_position_embeddings , snake_case_ )
print(model.load_state_dict(snake_case_ ) )
model.eval()
model.save_pretrained(snake_case_ )
print(F'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
if __name__ == "__main__":
_a : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--pytorch_model_path""", default=None, type=str, required=True, help="""Path to YOSO pytorch checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The json file for YOSO model config.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_a : Optional[Any] = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 145 |
"""simple docstring"""
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
__A : Tuple = logging.getLogger(__name__)
def A_ ( snake_case_ : str ):
'''simple docstring'''
UpperCamelCase : Dict = git.Repo(search_parent_directories=snake_case_ )
UpperCamelCase : Dict = {
"""repo_id""": str(snake_case_ ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
}
with open(os.path.join(snake_case_ ,"""git_log.json""" ) ,"""w""" ) as f:
json.dump(snake_case_ ,snake_case_ ,indent=4 )
def A_ ( snake_case_ : Union[str, Any] ):
'''simple docstring'''
if params.n_gpu <= 0:
UpperCamelCase : List[str] = 0
UpperCamelCase : List[str] = -1
UpperCamelCase : Optional[int] = True
UpperCamelCase : List[str] = False
return
assert torch.cuda.is_available()
logger.info("""Initializing GPUs""" )
if params.n_gpu > 1:
assert params.local_rank != -1
UpperCamelCase : List[str] = int(os.environ["""WORLD_SIZE"""] )
UpperCamelCase : Optional[Any] = int(os.environ["""N_GPU_NODE"""] )
UpperCamelCase : List[str] = int(os.environ["""RANK"""] )
# number of nodes / node ID
UpperCamelCase : Tuple = params.world_size // params.n_gpu_per_node
UpperCamelCase : Any = params.global_rank // params.n_gpu_per_node
UpperCamelCase : Any = True
assert params.n_nodes == int(os.environ["""N_NODES"""] )
assert params.node_id == int(os.environ["""NODE_RANK"""] )
# local job (single GPU)
else:
assert params.local_rank == -1
UpperCamelCase : Optional[int] = 1
UpperCamelCase : str = 0
UpperCamelCase : Union[str, Any] = 0
UpperCamelCase : int = 0
UpperCamelCase : str = 1
UpperCamelCase : str = 1
UpperCamelCase : int = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
UpperCamelCase : Dict = params.node_id == 0 and params.local_rank == 0
UpperCamelCase : List[Any] = params.n_nodes > 1
# summary
UpperCamelCase : Tuple = f'--- Global rank: {params.global_rank} - '
logger.info(PREFIX + """Number of nodes: %i""" % params.n_nodes )
logger.info(PREFIX + """Node ID : %i""" % params.node_id )
logger.info(PREFIX + """Local rank : %i""" % params.local_rank )
logger.info(PREFIX + """World size : %i""" % params.world_size )
logger.info(PREFIX + """GPUs per node : %i""" % params.n_gpu_per_node )
logger.info(PREFIX + """Master : %s""" % str(params.is_master ) )
logger.info(PREFIX + """Multi-node : %s""" % str(params.multi_node ) )
logger.info(PREFIX + """Multi-GPU : %s""" % str(params.multi_gpu ) )
logger.info(PREFIX + """Hostname : %s""" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("""Initializing PyTorch distributed""" )
torch.distributed.init_process_group(
init_method="""env://""" ,backend="""nccl""" ,)
def A_ ( snake_case_ : int ):
'''simple docstring'''
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 499 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class a ( __lowerCAmelCase ):
"""simple docstring"""
__lowerCAmelCase = """openai/whisper-base"""
__lowerCAmelCase = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
__lowerCAmelCase = """transcriber"""
__lowerCAmelCase = WhisperProcessor
__lowerCAmelCase = WhisperForConditionalGeneration
__lowerCAmelCase = ["""audio"""]
__lowerCAmelCase = ["""text"""]
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
return self.pre_processor(snake_case_ , return_tensors="""pt""" ).input_features
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
return self.model.generate(inputs=snake_case_ )
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
return self.pre_processor.batch_decode(snake_case_ , skip_special_tokens=snake_case_ )[0] | 466 | '''simple docstring'''
def UpperCamelCase__ ( _lowercase : int ) -> int:
if not isinstance(_lowercase , _lowercase ):
__UpperCAmelCase: List[str] = F'''Input value of [number={number}] must be an integer'''
raise TypeError(_lowercase )
if number < 1:
__UpperCAmelCase: Dict = F'''Input value of [number={number}] must be > 0'''
raise ValueError(_lowercase )
__UpperCAmelCase: int = 1
for i in range(1 , _lowercase ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod() | 466 | 1 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 480 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
a_ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.encoder.norm.weight''', '''encoder.layernorm.weight'''),
('''transformer.encoder.norm.bias''', '''encoder.layernorm.bias'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
]
)
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
"""simple docstring"""
snake_case_ : str = state_dict.pop(SCREAMING_SNAKE_CASE__ )
snake_case_ : Union[str, Any] = val
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
snake_case_ : Optional[int] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
snake_case_ : List[Any] = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
snake_case_ : Optional[Any] = value
else:
snake_case_ : Optional[int] = value
return new_state_dict
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] ):
"""simple docstring"""
snake_case_ : Any = """"""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
snake_case_ : str = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
snake_case_ : Tuple = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
snake_case_ : Any = in_proj_weight[:2_5_6, :]
snake_case_ : str = in_proj_bias[:2_5_6]
snake_case_ : List[str] = in_proj_weight[2_5_6:5_1_2, :]
snake_case_ : Union[str, Any] = in_proj_bias[2_5_6:5_1_2]
snake_case_ : Optional[int] = in_proj_weight[-2_5_6:, :]
snake_case_ : Dict = in_proj_bias[-2_5_6:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
snake_case_ : List[str] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
snake_case_ : Any = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
snake_case_ : Optional[int] = in_proj_weight[:2_5_6, :]
snake_case_ : Dict = in_proj_bias[:2_5_6]
snake_case_ : Any = in_proj_weight[2_5_6:5_1_2, :]
snake_case_ : Union[str, Any] = in_proj_bias[2_5_6:5_1_2]
snake_case_ : Tuple = in_proj_weight[-2_5_6:, :]
snake_case_ : Union[str, Any] = in_proj_bias[-2_5_6:]
# read in weights + bias of input projection layer of cross-attention
snake_case_ : Tuple = state_dict.pop(
f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' )
snake_case_ : Union[str, Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
snake_case_ : Tuple = in_proj_weight_cross_attn[:2_5_6, :]
snake_case_ : List[Any] = in_proj_bias_cross_attn[:2_5_6]
snake_case_ : List[str] = in_proj_weight_cross_attn[2_5_6:5_1_2, :]
snake_case_ : str = in_proj_bias_cross_attn[2_5_6:5_1_2]
snake_case_ : List[Any] = in_proj_weight_cross_attn[-2_5_6:, :]
snake_case_ : List[str] = in_proj_bias_cross_attn[-2_5_6:]
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
snake_case_ , snake_case_ : Any = image.size
snake_case_ : Tuple = max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : Union[str, Any] = 8_0_0 if """detection""" in checkpoint_url else 1_0_0_0
snake_case_ : str = target_max_size / current_max_size
snake_case_ : List[str] = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
snake_case_ : List[Any] = F.to_tensor(SCREAMING_SNAKE_CASE__ )
snake_case_ : int = F.normalize(SCREAMING_SNAKE_CASE__ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
logger.info("""Converting model...""" )
# load original state dict
snake_case_ : str = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location="""cpu""" )
# rename keys
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : Any = rename_backbone_keys(SCREAMING_SNAKE_CASE__ )
# query, key and value matrices need special treatment
read_in_q_k_v(SCREAMING_SNAKE_CASE__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
snake_case_ : Any = """model."""
for key in state_dict.copy().keys():
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
snake_case_ : List[Any] = state_dict.pop(SCREAMING_SNAKE_CASE__ )
snake_case_ : List[Any] = val
# create HuggingFace model and load state dict
snake_case_ : str = TableTransformerConfig(
backbone="""resnet18""" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
snake_case_ : Any = 1_5
snake_case_ : Optional[Any] = 2
snake_case_ : Dict = {0: """table""", 1: """table rotated"""}
snake_case_ : Union[str, Any] = idalabel
snake_case_ : str = {v: k for k, v in idalabel.items()}
else:
snake_case_ : Union[str, Any] = 1_2_5
snake_case_ : List[Any] = 6
snake_case_ : List[Any] = {
0: """table""",
1: """table column""",
2: """table row""",
3: """table column header""",
4: """table projected row header""",
5: """table spanning cell""",
}
snake_case_ : Tuple = idalabel
snake_case_ : int = {v: k for k, v in idalabel.items()}
snake_case_ : Any = DetrImageProcessor(
format="""coco_detection""" , max_size=8_0_0 if """detection""" in checkpoint_url else 1_0_0_0 )
snake_case_ : Tuple = TableTransformerForObjectDetection(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
model.eval()
# verify our conversion
snake_case_ : Tuple = """example_pdf.png""" if """detection""" in checkpoint_url else """example_table.png"""
snake_case_ : Optional[Any] = hf_hub_download(repo_id="""nielsr/example-pdf""" , repo_type="""dataset""" , filename=SCREAMING_SNAKE_CASE__ )
snake_case_ : int = Image.open(SCREAMING_SNAKE_CASE__ ).convert("""RGB""" )
snake_case_ : Optional[Any] = normalize(resize(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ).unsqueeze(0 )
snake_case_ : Optional[int] = model(SCREAMING_SNAKE_CASE__ )
if "detection" in checkpoint_url:
snake_case_ : int = (1, 1_5, 3)
snake_case_ : Tuple = torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
snake_case_ : Any = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
snake_case_ : int = (1, 1_2_5, 7)
snake_case_ : int = torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
snake_case_ : int = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
# Push model to HF hub
logger.info("""Pushing model to the hub...""" )
snake_case_ : Optional[Any] = (
"""microsoft/table-transformer-detection"""
if """detection""" in checkpoint_url
else """microsoft/table-transformer-structure-recognition"""
)
model.push_to_hub(SCREAMING_SNAKE_CASE__ )
image_processor.push_to_hub(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
type=str,
choices=[
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth''',
],
help='''URL of the Table Transformer checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
a_ = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 480 | 1 |
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase ):
return int((input_a, input_a).count(0 ) == 0 )
def UpperCAmelCase__ ( ):
assert and_gate(0, 0 ) == 0
assert and_gate(0, 1 ) == 0
assert and_gate(1, 0 ) == 0
assert and_gate(1, 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 453 |
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
_UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--user", type=str, default="ubuntu")
parser.add_argument("--host", type=str, default="localhost")
parser.add_argument("--key_path", type=str, default=None)
parser.add_argument("--instance", type=str, default="V100:1")
parser.add_argument("--provider", type=str, default="cheapest")
parser.add_argument("--use_spot", type=bool, default=False)
parser.add_argument("--example", type=str, default="pytorch/text-generation/run_generation.py")
_UpperCAmelCase , _UpperCAmelCase : Dict = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError("Cannot specify both BYO and on-demand cluster args")
_UpperCAmelCase : List[Any] = rh.cluster(
name="rh-cluster", ips=[args.host], ssh_creds={"ssh_user": args.user, "ssh_private_key": args.key_path}
)
else:
_UpperCAmelCase : Tuple = rh.cluster(
name="rh-cluster", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
_UpperCAmelCase : Any = args.example.rsplit("/", 1)[0]
# Set up remote environment
cluster.install_packages(["pip:./"]) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f'''pip install -r transformers/examples/{example_dir}/requirements.txt'''])
cluster.run(["pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f'''python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}'''])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 453 | 1 |
from statistics import mean, stdev
def lowerCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ = 3 ):
"""simple docstring"""
a_ = min(UpperCAmelCase__ )
a_ = max(UpperCAmelCase__ )
# normalize data
return [round((x - x_min) / (x_max - x_min) , UpperCAmelCase__ ) for x in data]
def lowerCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ = 3 ):
"""simple docstring"""
a_ = mean(UpperCAmelCase__ )
a_ = stdev(UpperCAmelCase__ )
# standardize data
return [round((x - mu) / (sigma) , UpperCAmelCase__ ) for x in data] | 483 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def lowerCamelCase_ ( UpperCAmelCase__ ):
"""simple docstring"""
a_ = {}
a_ = tokenizer(example["""content"""] , truncation=UpperCAmelCase__ )["""input_ids"""]
a_ = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
A_ : Optional[int] =HfArgumentParser(PretokenizationArguments)
A_ : Optional[Any] =parser.parse_args()
if args.num_workers is None:
A_ : List[str] =multiprocessing.cpu_count()
A_ : Optional[int] =AutoTokenizer.from_pretrained(args.tokenizer_dir)
A_ : int =time.time()
A_ : Any =load_dataset(args.dataset_name, split="""train""")
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
A_ : Optional[int] =time.time()
A_ : Any =ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"""repo_name""",
"""path""",
"""copies""",
"""size""",
"""content""",
"""license""",
"""hash""",
"""line_mean""",
"""line_max""",
"""alpha_frac""",
"""autogenerated""",
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
A_ : Optional[int] =time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''') | 483 | 1 |
'''simple docstring'''
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
UpperCAmelCase__ :Dict = logging.getLogger()
def __lowercase (_lowercase, _lowercase ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase : Dict = """\n""".join(_lowercase )
Path(_lowercase ).open("""w""" ).writelines(_lowercase )
UpperCAmelCase__ :int = """patrickvonplaten/t5-tiny-random"""
UpperCAmelCase__ :List[str] = """sshleifer/bart-tiny-random"""
UpperCAmelCase__ :str = """sshleifer/tiny-mbart"""
UpperCAmelCase__ :Any = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
def a_ ( self : Optional[Any] , A__ : List[Any] ):
"""simple docstring"""
__lowerCamelCase : Union[str, Any] = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
__lowerCamelCase : int = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
__lowerCamelCase : List[str] = [""" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."""]
_dump_articles(A__ , A__ )
__lowerCamelCase : Optional[Any] = str(Path(self.get_auto_remove_tmp_dir() ) / """scores.json""" )
__lowerCamelCase : Tuple = """translation_en_to_de""" if model == T5_TINY else """summarization"""
__lowerCamelCase : Optional[int] = f"\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n ".split()
with patch.object(A__ , """argv""" , A__ ):
run_generate()
assert Path(A__ ).exists()
# os.remove(Path(output_file_name))
def a_ ( self : str ):
"""simple docstring"""
self.run_eval_tester(A__ )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def a_ ( self : List[Any] , A__ : Dict ):
"""simple docstring"""
self.run_eval_tester(A__ )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def a_ ( self : Union[str, Any] , A__ : str ):
"""simple docstring"""
__lowerCamelCase : Any = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
__lowerCamelCase : List[Any] = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
__lowerCamelCase : List[Any] = {
"""en""": ["""Machine learning is great, isn't it?""", """I like to eat bananas""", """Tomorrow is another great day!"""],
"""de""": [
"""Maschinelles Lernen ist großartig, oder?""",
"""Ich esse gerne Bananen""",
"""Morgen ist wieder ein toller Tag!""",
],
}
__lowerCamelCase : Tuple = Path(self.get_auto_remove_tmp_dir() )
__lowerCamelCase : Union[str, Any] = str(tmp_dir / """scores.json""" )
__lowerCamelCase : Union[str, Any] = str(tmp_dir / """val.target""" )
_dump_articles(A__ , text["""en"""] )
_dump_articles(A__ , text["""de"""] )
__lowerCamelCase : Dict = """translation_en_to_de""" if model == T5_TINY else """summarization"""
__lowerCamelCase : str = f"\n run_eval_search.py\n {model}\n {str(A__ )}\n {str(A__ )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n ".split()
testargs.extend(["""--search""", """num_beams=1:2 length_penalty=0.9:1.0"""] )
with patch.object(A__ , """argv""" , A__ ):
with CaptureStdout() as cs:
run_search()
__lowerCamelCase : Union[str, Any] = [""" num_beams | length_penalty""", model, """Best score args"""]
__lowerCamelCase : int = ["""Info"""]
if "translation" in task:
expected_strings.append("""bleu""" )
else:
expected_strings.extend(A__ )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(A__ ).exists()
os.remove(Path(A__ ) )
| 706 |
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase__ :Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase__ :Dict = {"""vocab_file""": """vocab.txt""", """emoji_file""": """emoji.json"""}
UpperCAmelCase__ :List[Any] = {
"""vocab_file""": {
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt""",
},
"""emoji_file""": {
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json""",
},
}
UpperCAmelCase__ :Optional[Any] = {
"""abeja/gpt-neox-japanese-2.7b""": 2_048,
}
def __lowercase (_lowercase, _lowercase ) -> str:
"""simple docstring"""
with open(_lowercase, """r""", encoding="""utf-8""" ) as f:
__lowerCamelCase : Dict = json.loads(f.read() )
__lowerCamelCase : Dict = collections.OrderedDict()
__lowerCamelCase : int = collections.OrderedDict()
__lowerCamelCase : List[Any] = collections.OrderedDict()
with open(_lowercase, """r""", encoding="""utf-8""" ) as f:
__lowerCamelCase : Union[str, Any] = f.readlines()
__lowerCamelCase : Any = [[t.rstrip("""\n""" )] if (t == """,""" or """,""" not in t) else t.rstrip("""\n""" ).split(""",""" ) for t in token]
for idx, b in enumerate(_lowercase ):
__lowerCamelCase : str = b
__lowerCamelCase : List[str] = idx
for wd in b:
__lowerCamelCase : Optional[Any] = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
snake_case__ : Optional[Any] = VOCAB_FILES_NAMES
snake_case__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : Union[str, Any] = ['input_ids', 'attention_mask']
def __init__( self : Tuple , A__ : int , A__ : List[str] , A__ : Any="<|endoftext|>" , A__ : Tuple="<|endoftext|>" , A__ : Dict="<|startoftext|>" , A__ : Dict="<|endoftext|>" , A__ : List[str]=False , **A__ : List[Any] , ):
"""simple docstring"""
super().__init__(
unk_token=A__ , pad_token=A__ , bos_token=A__ , eos_token=A__ , do_clean_text=A__ , **A__ , )
if not os.path.isfile(A__ ):
raise ValueError(
f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
""" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" )
if not os.path.isfile(A__ ):
raise ValueError(
f"Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"
""" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" )
__lowerCamelCase : Tuple = do_clean_text
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[Any] = load_vocab_and_emoji(A__ , A__ )
__lowerCamelCase : Union[str, Any] = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def a_ ( self : List[Any] ):
"""simple docstring"""
return len(self.raw_vocab )
def a_ ( self : Union[str, Any] ):
"""simple docstring"""
return dict(self.raw_vocab , **self.added_tokens_encoder )
def a_ ( self : List[Any] , A__ : Optional[int] ):
"""simple docstring"""
return self.subword_tokenizer.tokenize(A__ , clean=self.do_clean_text )
def a_ ( self : Optional[Any] , A__ : Any ):
"""simple docstring"""
return self.vocab.get(A__ , self.vocab.get(self.unk_token ) )
def a_ ( self : Any , A__ : Any ):
"""simple docstring"""
return self.subword_tokenizer.convert_id_to_token(A__ )
def a_ ( self : Optional[int] , A__ : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase : List[Any] = """""".join(A__ ).strip()
return out_string
def a_ ( self : str , A__ : "Conversation" ):
"""simple docstring"""
__lowerCamelCase : List[str] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(A__ , add_special_tokens=A__ ) + [self.eos_token_id] )
if len(A__ ) > self.model_max_length:
__lowerCamelCase : str = input_ids[-self.model_max_length :]
return input_ids
def a_ ( self : str , A__ : str , A__ : Optional[str] = None ):
"""simple docstring"""
__lowerCamelCase : List[str] = 0
if os.path.isdir(A__ ):
__lowerCamelCase : Union[str, Any] = os.path.join(
A__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__lowerCamelCase : int = os.path.join(
A__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""emoji_file"""] )
else:
__lowerCamelCase : str = (
(filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""vocab_file"""]
)
__lowerCamelCase : int = (
(filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""emoji_file"""]
)
with open(A__ , """w""" , encoding="""utf-8""" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
""" Please check that the vocabulary is not corrupted!""" )
__lowerCamelCase : Union[str, Any] = token_index
writer.write(""",""".join(A__ ) + """\n""" )
index += 1
with open(A__ , """w""" , encoding="""utf-8""" ) as writer:
json.dump(self.emoji , A__ )
return vocab_file, emoji_file
class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
def __init__( self : Union[str, Any] , A__ : Any , A__ : str , A__ : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase : List[Any] = vocab # same as swe
__lowerCamelCase : Union[str, Any] = ids_to_tokens # same as bpe
__lowerCamelCase : List[str] = emoji
__lowerCamelCase : int = np.max([len(A__ ) for w in self.vocab.keys()] )
__lowerCamelCase : List[Any] = re.compile(r"""(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)""" )
__lowerCamelCase : Optional[Any] = re.compile(r"""[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*""" )
__lowerCamelCase : str = re.compile(r"""[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}""" )
__lowerCamelCase : Optional[int] = re.compile(
r"""([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" )
__lowerCamelCase : Union[str, Any] = re.compile(
r"""(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" )
__lowerCamelCase : Tuple = re.compile(
r"""((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*""" )
__lowerCamelCase : List[str] = """─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"""
__lowerCamelCase : List[str] = """▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"""
__lowerCamelCase : Optional[int] = str.maketrans({k: """<BLOCK>""" for k in keisen + blocks} )
def __len__( self : Tuple ):
"""simple docstring"""
return len(self.ids_to_tokens )
def a_ ( self : Dict , A__ : Tuple ):
"""simple docstring"""
__lowerCamelCase : List[Any] = self.content_repattera.sub("""<URL>""" , A__ )
__lowerCamelCase : List[Any] = self.content_repattera.sub("""<EMAIL>""" , A__ )
__lowerCamelCase : str = self.content_repattera.sub("""<TEL>""" , A__ )
__lowerCamelCase : Optional[int] = self.content_repattera.sub("""<DATE>""" , A__ )
__lowerCamelCase : Any = self.content_repattera.sub("""<DATE>""" , A__ )
__lowerCamelCase : List[str] = self.content_repattera.sub("""<PRICE>""" , A__ )
__lowerCamelCase : Any = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
__lowerCamelCase : Dict = content.replace("""<BLOCK><BLOCK>""" , """<BLOCK>""" )
return content
def a_ ( self : List[Any] , A__ : Dict , A__ : Optional[Any]=False ):
"""simple docstring"""
__lowerCamelCase : Union[str, Any] = text.replace(""" """ , """<SP>""" )
__lowerCamelCase : Optional[int] = text.replace(""" """ , """<SP>""" )
__lowerCamelCase : Tuple = text.replace("""\r\n""" , """<BR>""" )
__lowerCamelCase : Any = text.replace("""\n""" , """<BR>""" )
__lowerCamelCase : Tuple = text.replace("""\r""" , """<BR>""" )
__lowerCamelCase : List[str] = text.replace("""\t""" , """<TAB>""" )
__lowerCamelCase : List[Any] = text.replace("""—""" , """ー""" )
__lowerCamelCase : Any = text.replace("""−""" , """ー""" )
for k, v in self.emoji["emoji"].items():
if k in text:
__lowerCamelCase : Dict = text.replace(A__ , A__ )
if clean:
__lowerCamelCase : Optional[int] = self.clean_text(A__ )
def check_simbol(A__ : Union[str, Any] ):
__lowerCamelCase : str = x.encode()
if len(A__ ) == 1 and len(A__ ) == 2:
__lowerCamelCase : Optional[int] = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0XC2_A1 and c <= 0XC2_BF)
or (c >= 0XC7_80 and c <= 0XC7_83)
or (c >= 0XCA_B9 and c <= 0XCB_BF)
or (c >= 0XCC_80 and c <= 0XCD_A2)
):
return True
return False
def checkuae(A__ : Tuple ):
__lowerCamelCase : List[Any] = x.encode()
if len(A__ ) == 1 and len(A__ ) == 3:
__lowerCamelCase : List[Any] = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0XE2_80_80 and c <= 0XE2_B0_7F:
return True
return False
__lowerCamelCase : List[Any] = 0
__lowerCamelCase : Optional[Any] = []
while pos < len(A__ ):
__lowerCamelCase : Tuple = min(len(A__ ) , pos + self.maxlen + 1 ) if text[pos] == """<""" else pos + 3
__lowerCamelCase : Optional[int] = [] # (token_id, token, pos)
for e in range(A__ , A__ , -1 ):
__lowerCamelCase : Dict = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(A__ ) > 2:
__lowerCamelCase : Any = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(A__ ) > 0:
# the smallest token_id is adopted
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Dict = sorted(A__ , key=lambda A__ : x[0] )[0]
result.append(A__ )
__lowerCamelCase : Optional[Any] = e
else:
__lowerCamelCase : Any = pos + 1
__lowerCamelCase : Any = text[pos:end]
if check_simbol(A__ ):
result.append("""<KIGOU>""" )
elif checkuae(A__ ):
result.append("""<U2000U2BFF>""" )
else:
for i in wd.encode("""utf-8""" ):
result.append("""<|byte%d|>""" % i )
__lowerCamelCase : Optional[int] = end
return result
def a_ ( self : Optional[Any] , A__ : Optional[int] , A__ : Any="\n" ):
"""simple docstring"""
__lowerCamelCase : Tuple = []
__lowerCamelCase : List[Any] = []
__lowerCamelCase : Optional[int] = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(A__ ) > 0:
words.append(bytearray(A__ ).decode("""utf-8""" , errors="""replace""" ) )
__lowerCamelCase : Tuple = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["""emoji_inv"""][word] )
elif word == "<SP>":
words.append(""" """ )
elif word == "<BR>":
words.append(A__ )
elif word == "<TAB>":
words.append("""\t""" )
elif word == "<BLOCK>":
words.append("""▀""" )
elif word == "<KIGOU>":
words.append("""ǀ""" )
elif word == "<U2000U2BFF>":
words.append("""‖""" )
else:
words.append(A__ )
if len(A__ ) > 0:
words.append(bytearray(A__ ).decode("""utf-8""" , errors="""replace""" ) )
__lowerCamelCase : str = """""".join(A__ )
return text
| 483 | 0 |
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"""huggingface/autoformer-tourism-monthly""": """https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json""",
}
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'autoformer'
__lowerCamelCase = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self :str , _lowercase :Optional[int] = None , _lowercase :Optional[int] = None , _lowercase :str = "student_t" , _lowercase :str = "nll" , _lowercase :int = 1 , _lowercase :List[int] = [1, 2, 3, 4, 5, 6, 7] , _lowercase :bool = True , _lowercase :int = 0 , _lowercase :int = 0 , _lowercase :int = 0 , _lowercase :int = 0 , _lowercase :Optional[List[int]] = None , _lowercase :Optional[List[int]] = None , _lowercase :int = 64 , _lowercase :int = 2 , _lowercase :int = 2 , _lowercase :int = 2 , _lowercase :int = 2 , _lowercase :int = 32 , _lowercase :int = 32 , _lowercase :str = "gelu" , _lowercase :float = 0.1 , _lowercase :float = 0.1 , _lowercase :float = 0.1 , _lowercase :float = 0.1 , _lowercase :float = 0.1 , _lowercase :int = 1_00 , _lowercase :float = 0.02 , _lowercase :bool = True , _lowercase :Optional[Any]=True , _lowercase :int = 10 , _lowercase :int = 25 , _lowercase :int = 3 , **_lowercase :int , ):
'''simple docstring'''
lowercase__ = prediction_length
lowercase__ = context_length if context_length is not None else prediction_length
lowercase__ = distribution_output
lowercase__ = loss
lowercase__ = input_size
lowercase__ = num_time_features
lowercase__ = lags_sequence
lowercase__ = scaling
lowercase__ = num_dynamic_real_features
lowercase__ = num_static_real_features
lowercase__ = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(_lowercase ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
lowercase__ = cardinality
else:
lowercase__ = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(_lowercase ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
lowercase__ = embedding_dimension
else:
lowercase__ = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowercase__ = num_parallel_samples
# Transformer architecture configuration
lowercase__ = input_size * len(self.lags_sequence ) + self._number_of_features
lowercase__ = d_model
lowercase__ = encoder_attention_heads
lowercase__ = decoder_attention_heads
lowercase__ = encoder_ffn_dim
lowercase__ = decoder_ffn_dim
lowercase__ = encoder_layers
lowercase__ = decoder_layers
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = encoder_layerdrop
lowercase__ = decoder_layerdrop
lowercase__ = activation_function
lowercase__ = init_std
lowercase__ = use_cache
# Autoformer
lowercase__ = label_length
lowercase__ = moving_average
lowercase__ = autocorrelation_factor
super().__init__(is_encoder_decoder=_lowercase , **_lowercase )
@property
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 655 |
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
_snake_case = logging.get_logger(__name__)
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'AutoTokenizer'
__lowerCamelCase = ['tokenizer']
__lowerCamelCase = {
'semantic_prompt': 1,
'coarse_prompt': 2,
'fine_prompt': 2,
}
def __init__( self :Dict , _lowercase :List[str] , _lowercase :List[Any]=None ):
'''simple docstring'''
super().__init__(_lowercase )
lowercase__ = speaker_embeddings
@classmethod
def UpperCAmelCase ( cls :Any , _lowercase :int , _lowercase :str="speaker_embeddings_path.json" , **_lowercase :List[str] ):
'''simple docstring'''
if speaker_embeddings_dict_path is not None:
lowercase__ = get_file_from_repo(
_lowercase , _lowercase , subfolder=kwargs.pop("subfolder" , _lowercase ) , cache_dir=kwargs.pop("cache_dir" , _lowercase ) , force_download=kwargs.pop("force_download" , _lowercase ) , proxies=kwargs.pop("proxies" , _lowercase ) , resume_download=kwargs.pop("resume_download" , _lowercase ) , local_files_only=kwargs.pop("local_files_only" , _lowercase ) , use_auth_token=kwargs.pop("use_auth_token" , _lowercase ) , revision=kwargs.pop("revision" , _lowercase ) , )
if speaker_embeddings_path is None:
logger.warning(
f'''`{os.path.join(_lowercase , _lowercase )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.''' )
lowercase__ = None
else:
with open(_lowercase ) as speaker_embeddings_json:
lowercase__ = json.load(_lowercase )
else:
lowercase__ = None
lowercase__ = AutoTokenizer.from_pretrained(_lowercase , **_lowercase )
return cls(tokenizer=_lowercase , speaker_embeddings=_lowercase )
def UpperCAmelCase ( self :Any , _lowercase :Any , _lowercase :List[str]="speaker_embeddings_path.json" , _lowercase :Any="speaker_embeddings" , _lowercase :bool = False , **_lowercase :Any , ):
'''simple docstring'''
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(_lowercase , _lowercase , "v2" ) , exist_ok=_lowercase )
lowercase__ = {}
lowercase__ = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
lowercase__ = self._load_voice_preset(_lowercase )
lowercase__ = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["repo_or_path"] , _lowercase , f'''{prompt_key}_{key}''' ) , voice_preset[key] , allow_pickle=_lowercase , )
lowercase__ = os.path.join(_lowercase , f'''{prompt_key}_{key}.npy''' )
lowercase__ = tmp_dict
with open(os.path.join(_lowercase , _lowercase ) , "w" ) as fp:
json.dump(_lowercase , _lowercase )
super().save_pretrained(_lowercase , _lowercase , **_lowercase )
def UpperCAmelCase ( self :Optional[int] , _lowercase :str = None , **_lowercase :List[Any] ):
'''simple docstring'''
lowercase__ = self.speaker_embeddings[voice_preset]
lowercase__ = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f'''Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].''' )
lowercase__ = get_file_from_repo(
self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] , subfolder=kwargs.pop("subfolder" , _lowercase ) , cache_dir=kwargs.pop("cache_dir" , _lowercase ) , force_download=kwargs.pop("force_download" , _lowercase ) , proxies=kwargs.pop("proxies" , _lowercase ) , resume_download=kwargs.pop("resume_download" , _lowercase ) , local_files_only=kwargs.pop("local_files_only" , _lowercase ) , use_auth_token=kwargs.pop("use_auth_token" , _lowercase ) , revision=kwargs.pop("revision" , _lowercase ) , )
if path is None:
raise ValueError(
f'''`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.''' )
lowercase__ = np.load(_lowercase )
return voice_preset_dict
def UpperCAmelCase ( self :Optional[int] , _lowercase :Optional[dict] = None ):
'''simple docstring'''
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f'''Voice preset unrecognized, missing {key} as a key.''' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
def __call__( self :Optional[Any] , _lowercase :Optional[Any]=None , _lowercase :List[str]=None , _lowercase :List[str]="pt" , _lowercase :List[Any]=2_56 , _lowercase :List[str]=False , _lowercase :Union[str, Any]=True , _lowercase :Dict=False , **_lowercase :Tuple , ):
'''simple docstring'''
if voice_preset is not None and not isinstance(_lowercase , _lowercase ):
if (
isinstance(_lowercase , _lowercase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
lowercase__ = self._load_voice_preset(_lowercase )
else:
if isinstance(_lowercase , _lowercase ) and not voice_preset.endswith(".npz" ):
lowercase__ = voice_preset + ".npz"
lowercase__ = np.load(_lowercase )
if voice_preset is not None:
self._validate_voice_preset_dict(_lowercase , **_lowercase )
lowercase__ = BatchFeature(data=_lowercase , tensor_type=_lowercase )
lowercase__ = self.tokenizer(
_lowercase , return_tensors=_lowercase , padding="max_length" , max_length=_lowercase , return_attention_mask=_lowercase , return_token_type_ids=_lowercase , add_special_tokens=_lowercase , **_lowercase , )
if voice_preset is not None:
lowercase__ = voice_preset
return encoded_text
| 655 | 1 |
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
def A__ ( __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = nn.functional.normalize(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = nn.functional.normalize(__lowerCamelCase )
return torch.mm(__lowerCamelCase, normalized_text_embeds.t() )
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =CLIPConfig
UpperCAmelCase_ =["CLIPEncoderLayer"]
def __init__( self , _A ) -> List[Any]:
super().__init__(_A )
SCREAMING_SNAKE_CASE_ = CLIPVisionModel(config.vision_config )
SCREAMING_SNAKE_CASE_ = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=_A )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=_A )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=_A )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.ones(17 ) , requires_grad=_A )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.ones(3 ) , requires_grad=_A )
@torch.no_grad()
def _UpperCamelCase ( self , _A , _A ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.vision_model(_A )[1] # pooled_output
SCREAMING_SNAKE_CASE_ = self.visual_projection(_A )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
SCREAMING_SNAKE_CASE_ = cosine_distance(_A , self.special_care_embeds ).cpu().float().numpy()
SCREAMING_SNAKE_CASE_ = cosine_distance(_A , self.concept_embeds ).cpu().float().numpy()
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = image_embeds.shape[0]
for i in range(_A ):
SCREAMING_SNAKE_CASE_ = {'''special_scores''': {}, '''special_care''': [], '''concept_scores''': {}, '''bad_concepts''': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
SCREAMING_SNAKE_CASE_ = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
SCREAMING_SNAKE_CASE_ = special_cos_dist[i][concept_idx]
SCREAMING_SNAKE_CASE_ = self.special_care_embeds_weights[concept_idx].item()
SCREAMING_SNAKE_CASE_ = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['''special_scores'''][concept_idx]} )
SCREAMING_SNAKE_CASE_ = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
SCREAMING_SNAKE_CASE_ = cos_dist[i][concept_idx]
SCREAMING_SNAKE_CASE_ = self.concept_embeds_weights[concept_idx].item()
SCREAMING_SNAKE_CASE_ = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(_A )
result.append(_A )
SCREAMING_SNAKE_CASE_ = [len(res['''bad_concepts'''] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def _UpperCamelCase ( self , _A , _A ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.vision_model(_A )[1] # pooled_output
SCREAMING_SNAKE_CASE_ = self.visual_projection(_A )
SCREAMING_SNAKE_CASE_ = cosine_distance(_A , self.special_care_embeds )
SCREAMING_SNAKE_CASE_ = cosine_distance(_A , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
SCREAMING_SNAKE_CASE_ = 0.0
SCREAMING_SNAKE_CASE_ = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
SCREAMING_SNAKE_CASE_ = torch.any(special_scores > 0 , dim=1 )
SCREAMING_SNAKE_CASE_ = special_care * 0.01
SCREAMING_SNAKE_CASE_ = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
SCREAMING_SNAKE_CASE_ = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
SCREAMING_SNAKE_CASE_ = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 701 |
from pathlib import Path
import fire
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = Path(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = Path(__lowerCamelCase )
dest_dir.mkdir(exist_ok=__lowerCamelCase )
for path in src_dir.iterdir():
SCREAMING_SNAKE_CASE_ = [x.rstrip() for x in list(path.open().readlines() )][:n]
SCREAMING_SNAKE_CASE_ = dest_dir.joinpath(path.name )
print(__lowerCamelCase )
dest_path.open('''w''' ).write('''\n'''.join(__lowerCamelCase ) )
if __name__ == "__main__":
fire.Fire(minify)
| 597 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {
"configuration_blip": [
"BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlipConfig",
"BlipTextConfig",
"BlipVisionConfig",
],
"processing_blip": ["BlipProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["BlipImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlipModel",
"BlipPreTrainedModel",
"BlipForConditionalGeneration",
"BlipForQuestionAnswering",
"BlipVisionModel",
"BlipTextModel",
"BlipForImageTextRetrieval",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFBlipModel",
"TFBlipPreTrainedModel",
"TFBlipForConditionalGeneration",
"TFBlipForQuestionAnswering",
"TFBlipVisionModel",
"TFBlipTextModel",
"TFBlipForImageTextRetrieval",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 11 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] ):
return getitem, k
def snake_case_ ( lowerCAmelCase_ : Any , lowerCAmelCase_ : Any ):
return setitem, k, v
def snake_case_ ( lowerCAmelCase_ : List[Any] ):
return delitem, k
def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int , *lowerCAmelCase_ : Optional[int] ):
try:
return fun(lowerCAmelCase_ , *lowerCAmelCase_ ), None
except Exception as e:
return None, e
lowerCamelCase : List[str] = (
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
)
lowerCamelCase : Any = [
_set('''key_a''', '''val_a'''),
_set('''key_a''', '''val_b'''),
]
lowerCamelCase : Optional[int] = [
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
_del('''key_a'''),
_del('''key_b'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
]
lowerCamelCase : Optional[int] = [
_get('''key_a'''),
_del('''key_a'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
_del('''key_a'''),
_get('''key_a'''),
]
lowerCamelCase : Dict = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
lowerCamelCase : Optional[int] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('''key_a''', '''val_b'''),
]
@pytest.mark.parametrize(
"""operations""" , (
pytest.param(_add_items , id="""add items""" ),
pytest.param(_overwrite_items , id="""overwrite items""" ),
pytest.param(_delete_items , id="""delete items""" ),
pytest.param(_access_absent_items , id="""access absent items""" ),
pytest.param(_add_with_resize_up , id="""add with resize up""" ),
pytest.param(_add_with_resize_down , id="""add with resize down""" ),
) , )
def snake_case_ ( lowerCAmelCase_ : Any ):
__lowercase : Tuple = HashMap(initial_block_size=4 )
__lowercase : Union[str, Any] = {}
for _, (fun, *args) in enumerate(lowerCAmelCase_ ):
__lowercase , __lowercase : Tuple = _run_operation(lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_ )
__lowercase , __lowercase : int = _run_operation(lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_ )
assert my_res == py_res
assert str(lowerCAmelCase_ ) == str(lowerCAmelCase_ )
assert set(lowerCAmelCase_ ) == set(lowerCAmelCase_ )
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )
assert set(my.items() ) == set(py.items() )
def snake_case_ ( ):
def is_public(lowerCAmelCase_ : str ) -> bool:
return not name.startswith("""_""" )
__lowercase : Optional[Any] = {name for name in dir({} ) if is_public(lowerCAmelCase_ )}
__lowercase : List[str] = {name for name in dir(HashMap() ) if is_public(lowerCAmelCase_ )}
assert dict_public_names > hash_public_names | 149 | 0 |
"""simple docstring"""
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
_UpperCamelCase : List[Any] = "src/transformers"
_UpperCamelCase : List[str] = "docs/source/en"
_UpperCamelCase : List[Any] = "."
def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] ):
'''simple docstring'''
with open(_lowerCAmelCase , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowercase__ : List[Any] = f.readlines()
# Find the start prompt.
lowercase__ : Dict = 0
while not lines[start_index].startswith(_lowerCAmelCase ):
start_index += 1
start_index += 1
lowercase__ : Any = start_index
while not lines[end_index].startswith(_lowerCAmelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
_UpperCamelCase : List[str] = "Model|Encoder|Decoder|ForConditionalGeneration"
# Regexes that match TF/Flax/PT model names.
_UpperCamelCase : str = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
_UpperCamelCase : Tuple = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_UpperCamelCase : Union[str, Any] = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# This is to make sure the transformers module imported is the one in the repo.
_UpperCamelCase : Union[str, Any] = direct_transformers_import(TRANSFORMERS_PATH)
def a_ ( _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : Optional[int] = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)' , _lowerCAmelCase )
return [m.group(0 ) for m in matches]
def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : Dict = 2 if text == '✅' or text == '❌' else len(_lowerCAmelCase )
lowercase__ : Optional[Any] = (width - text_length) // 2
lowercase__ : int = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def a_ ( ):
'''simple docstring'''
lowercase__ : Tuple = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
lowercase__ : Union[str, Any] = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
lowercase__ : int = {name: config.replace('Config' , '' ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
lowercase__ : List[str] = collections.defaultdict(_lowerCAmelCase )
lowercase__ : Optional[Any] = collections.defaultdict(_lowerCAmelCase )
lowercase__ : Dict = collections.defaultdict(_lowerCAmelCase )
lowercase__ : Optional[Any] = collections.defaultdict(_lowerCAmelCase )
lowercase__ : Optional[Any] = collections.defaultdict(_lowerCAmelCase )
# Let's lookup through all transformers object (once).
for attr_name in dir(_lowerCAmelCase ):
lowercase__ : List[Any] = None
if attr_name.endswith('Tokenizer' ):
lowercase__ : List[Any] = slow_tokenizers
lowercase__ : Any = attr_name[:-9]
elif attr_name.endswith('TokenizerFast' ):
lowercase__ : Any = fast_tokenizers
lowercase__ : Tuple = attr_name[:-13]
elif _re_tf_models.match(_lowerCAmelCase ) is not None:
lowercase__ : Optional[int] = tf_models
lowercase__ : str = _re_tf_models.match(_lowerCAmelCase ).groups()[0]
elif _re_flax_models.match(_lowerCAmelCase ) is not None:
lowercase__ : Union[str, Any] = flax_models
lowercase__ : Any = _re_flax_models.match(_lowerCAmelCase ).groups()[0]
elif _re_pt_models.match(_lowerCAmelCase ) is not None:
lowercase__ : Union[str, Any] = pt_models
lowercase__ : Dict = _re_pt_models.match(_lowerCAmelCase ).groups()[0]
if lookup_dict is not None:
while len(_lowerCAmelCase ) > 0:
if attr_name in model_name_to_prefix.values():
lowercase__ : Union[str, Any] = True
break
# Try again after removing the last word in the name
lowercase__ : str = ''.join(camel_case_split(_lowerCAmelCase )[:-1] )
# Let's build that table!
lowercase__ : str = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
lowercase__ : str = ['Model', 'Tokenizer slow', 'Tokenizer fast', 'PyTorch support', 'TensorFlow support', 'Flax Support']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
lowercase__ : Dict = [len(_lowerCAmelCase ) + 2 for c in columns]
lowercase__ : List[str] = max([len(_lowerCAmelCase ) for name in model_names] ) + 2
# Build the table per se
lowercase__ : Optional[int] = '|' + '|'.join([_center_text(_lowerCAmelCase , _lowerCAmelCase ) for c, w in zip(_lowerCAmelCase , _lowerCAmelCase )] ) + '|\n'
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([':' + '-' * (w - 2) + ':' for w in widths] ) + "|\n"
lowercase__ : Tuple = {True: '✅', False: '❌'}
for name in model_names:
lowercase__ : Tuple = model_name_to_prefix[name]
lowercase__ : Optional[int] = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(_lowerCAmelCase , _lowerCAmelCase ) for l, w in zip(_lowerCAmelCase , _lowerCAmelCase )] ) + "|\n"
return table
def a_ ( _lowerCAmelCase : Dict=False ):
'''simple docstring'''
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Optional[int] = _find_text_in_file(
filename=os.path.join(_lowerCAmelCase , 'index.md' ) , start_prompt='<!--This table is updated automatically from the auto modules' , end_prompt='<!-- End table-->' , )
lowercase__ : List[str] = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(_lowerCAmelCase , 'index.md' ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
'The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.' )
if __name__ == "__main__":
_UpperCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
_UpperCamelCase : str = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 645 | """simple docstring"""
from __future__ import annotations
def a_ ( _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float , ):
'''simple docstring'''
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif stress < 0:
raise ValueError('Stress cannot be negative' )
elif tangential_force < 0:
raise ValueError('Tangential Force cannot be negative' )
elif area < 0:
raise ValueError('Area cannot be negative' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 645 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case :Union[str, Any] =logging.get_logger(__name__)
__snake_case :List[Any] ={
'facebook/nllb-moe-54B': 'https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json',
}
class lowerCAmelCase__ ( _lowerCamelCase ):
A_ : Dict = 'nllb-moe'
A_ : Optional[int] = ['past_key_values']
A_ : Any = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : int , __UpperCamelCase : int=128_112 , __UpperCamelCase : str=1_024 , __UpperCamelCase : Optional[Any]=12 , __UpperCamelCase : str=4_096 , __UpperCamelCase : Tuple=16 , __UpperCamelCase : List[str]=12 , __UpperCamelCase : Optional[Any]=4_096 , __UpperCamelCase : int=16 , __UpperCamelCase : str=0.0_5 , __UpperCamelCase : Optional[Any]=0.0_5 , __UpperCamelCase : Dict=True , __UpperCamelCase : List[Any]=True , __UpperCamelCase : Any="relu" , __UpperCamelCase : Any=1_024 , __UpperCamelCase : Dict=0.1 , __UpperCamelCase : Union[str, Any]=0.1 , __UpperCamelCase : int=0.0 , __UpperCamelCase : int=0.0_2 , __UpperCamelCase : str=2 , __UpperCamelCase : str=True , __UpperCamelCase : Dict=False , __UpperCamelCase : List[Any]="float32" , __UpperCamelCase : Tuple=False , __UpperCamelCase : Any=128 , __UpperCamelCase : Optional[int]=64 , __UpperCamelCase : List[str]=4 , __UpperCamelCase : Dict=4 , __UpperCamelCase : int=0.0_0_1 , __UpperCamelCase : Dict=0.0_0_1 , __UpperCamelCase : Optional[Any]="all" , __UpperCamelCase : List[str]=False , __UpperCamelCase : Tuple=False , __UpperCamelCase : Any=1.0 , __UpperCamelCase : List[str]=0.2 , __UpperCamelCase : str=1 , __UpperCamelCase : Optional[int]=0 , __UpperCamelCase : Union[str, Any]=2 , __UpperCamelCase : Dict=False , **__UpperCamelCase : Optional[Any] , ) -> Optional[int]:
A = vocab_size
A = max_position_embeddings
A = d_model
A = encoder_ffn_dim
A = encoder_layers
A = encoder_attention_heads
A = decoder_ffn_dim
A = decoder_layers
A = decoder_attention_heads
A = dropout
A = attention_dropout
A = activation_dropout
A = activation_function
A = init_std
A = encoder_layerdrop
A = decoder_layerdrop
A = use_cache
A = encoder_layers
A = scale_embedding # scale factor will be sqrt(d_model) if True
A = router_z_loss_coef
A = router_aux_loss_coef
A = decoder_sparse_step
A = encoder_sparse_step
A = num_experts
A = expert_capacity
A = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
A = router_dtype
A = router_ignore_padding_tokens
A = batch_prioritized_routing
A = second_expert_policy
A = normalize_router_prob_before_dropping
A = moe_eval_capacity_token_fraction
A = moe_token_dropout
A = output_router_logits
super().__init__(
pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , is_encoder_decoder=__UpperCamelCase , decoder_start_token_id=__UpperCamelCase , **__UpperCamelCase , ) | 106 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : int , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : float = 0 ) -> None:
__magic_name__ , __magic_name__ = row, column
__magic_name__ = [[default_value for c in range(_lowerCamelCase )] for r in range(_lowerCamelCase )]
def __str__( self : Optional[Any] ) -> str:
__magic_name__ = f'Matrix consist of {self.row} rows and {self.column} columns\n'
# Make string identifier
__magic_name__ = 0
for row_vector in self.array:
for obj in row_vector:
__magic_name__ = max(_lowerCamelCase , len(str(_lowerCamelCase ) ) )
__magic_name__ = f'%{max_element_length}s'
# Make string and return
def single_line(_lowerCamelCase : list[float] ) -> str:
nonlocal string_format_identifier
__magic_name__ = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(_lowerCamelCase ) for row_vector in self.array )
return s
def __repr__( self : Optional[int] ) -> str:
return str(self )
def __A ( self : Optional[Any] , _lowerCamelCase : tuple[int, int] ) -> bool:
if not (isinstance(_lowerCamelCase , (list, tuple) ) and len(_lowerCamelCase ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Optional[int] , _lowerCamelCase : tuple[int, int] ) -> Any:
assert self.validate_indicies(_lowerCamelCase )
return self.array[loc[0]][loc[1]]
def __setitem__( self : Tuple , _lowerCamelCase : tuple[int, int] , _lowerCamelCase : float ) -> None:
assert self.validate_indicies(_lowerCamelCase )
__magic_name__ = value
def __add__( self : Union[str, Any] , _lowerCamelCase : Matrix ) -> Matrix:
assert isinstance(_lowerCamelCase , _lowerCamelCase )
assert self.row == another.row and self.column == another.column
# Add
__magic_name__ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__magic_name__ = self[r, c] + another[r, c]
return result
def __neg__( self : int ) -> Matrix:
__magic_name__ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__magic_name__ = -self[r, c]
return result
def __sub__( self : Optional[int] , _lowerCamelCase : Matrix ) -> Matrix:
return self + (-another)
def __mul__( self : Optional[int] , _lowerCamelCase : int | float | Matrix ) -> Matrix:
if isinstance(_lowerCamelCase , (int, float) ): # Scalar multiplication
__magic_name__ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__magic_name__ = self[r, c] * another
return result
elif isinstance(_lowerCamelCase , _lowerCamelCase ): # Matrix multiplication
assert self.column == another.row
__magic_name__ = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__magic_name__ = f'Unsupported type given for another ({type(_lowerCamelCase )})'
raise TypeError(_lowerCamelCase )
def __A ( self : Optional[int] ) -> Matrix:
__magic_name__ = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
__magic_name__ = self[r, c]
return result
def __A ( self : int , _lowerCamelCase : Matrix , _lowerCamelCase : Matrix ) -> Any:
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(_lowerCamelCase , _lowerCamelCase )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__magic_name__ = v.transpose()
__magic_name__ = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def __snake_case ( ):
'''simple docstring'''
__magic_name__ = Matrix(3 , 3 , 0 )
for i in range(3 ):
__magic_name__ = 1
print(F'a^(-1) is {ainv}' )
# u, v
__magic_name__ = Matrix(3 , 1 , 0 )
__magic_name__ , __magic_name__ , __magic_name__ = 1, 2, -3
__magic_name__ = Matrix(3 , 1 , 0 )
__magic_name__ , __magic_name__ , __magic_name__ = 4, -2, 5
print(F'u is {u}' )
print(F'v is {v}' )
print(F'uv^T is {u * v.transpose()}' )
# Sherman Morrison
print(F'(a + uv^T)^(-1) is {ainv.sherman_morrison(lowerCamelCase_ , lowerCamelCase_ )}' )
def __snake_case ( ):
'''simple docstring'''
import doctest
doctest.testmod()
testa()
| 664 | 0 |
def _A ( __magic_name__ , __magic_name__ ):
# Check if the input is valid
if not len(__magic_name__ ) == len(__magic_name__ ) == 3:
raise ValueError("Please enter a valid equation." )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("Both a & b of two equations can't be zero." )
# Extract the coefficients
lowercase__ , lowercase__ , lowercase__ = equationa
lowercase__ , lowercase__ , lowercase__ = equationa
# Calculate the determinants of the matrices
lowercase__ = aa * ba - aa * ba
lowercase__ = ca * ba - ca * ba
lowercase__ = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("Infinite solutions. (Consistent system)" )
else:
raise ValueError("No solution. (Inconsistent system)" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
lowercase__ = determinant_x / determinant
lowercase__ = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 611 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json""",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'convbert'
def __init__( self :List[Any] , _lowercase :int=3_05_22 , _lowercase :List[Any]=7_68 , _lowercase :List[str]=12 , _lowercase :Tuple=12 , _lowercase :Optional[int]=30_72 , _lowercase :List[Any]="gelu" , _lowercase :str=0.1 , _lowercase :Optional[int]=0.1 , _lowercase :Optional[int]=5_12 , _lowercase :Optional[Any]=2 , _lowercase :List[str]=0.02 , _lowercase :Optional[Any]=1e-12 , _lowercase :Optional[Any]=1 , _lowercase :Tuple=0 , _lowercase :Optional[int]=2 , _lowercase :Optional[Any]=7_68 , _lowercase :str=2 , _lowercase :Optional[Any]=9 , _lowercase :List[Any]=1 , _lowercase :Tuple=None , **_lowercase :Optional[int] , ):
'''simple docstring'''
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase , )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = embedding_size
lowercase__ = head_ratio
lowercase__ = conv_kernel_size
lowercase__ = num_groups
lowercase__ = classifier_dropout
class lowerCAmelCase ( lowercase_ ):
@property
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
lowercase__ = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowercase__ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 611 | 1 |
def _lowerCamelCase ( __A : str ) -> str:
_UpperCAmelCase : List[Any] = 0
# if input_string is "aba" than new_input_string become "a|b|a"
_UpperCAmelCase : Optional[int] = ''''''
_UpperCAmelCase : Union[str, Any] = ''''''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(__A ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_UpperCAmelCase , _UpperCAmelCase : Tuple = 0, 0
# length[i] shows the length of palindromic substring with center i
_UpperCAmelCase : Optional[int] = [1 for i in range(len(__A ) )]
# for each character in new_string find corresponding palindromic string
_UpperCAmelCase : Dict = 0
for j in range(len(__A ) ):
_UpperCAmelCase : List[str] = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(__A )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_UpperCAmelCase : List[str] = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_UpperCAmelCase : List[Any] = j - k + 1 # noqa: E741
_UpperCAmelCase : Any = j + k - 1
# update max_length and start position
if max_length < length[j]:
_UpperCAmelCase : Optional[int] = length[j]
_UpperCAmelCase : Dict = j
# create that string
_UpperCAmelCase : Optional[int] = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 485 |
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class A_ ( __lowercase ):
'''simple docstring'''
def __init__( self , *_A , **_A) -> None:
"""simple docstring"""
warnings.warn(
'''The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PerceiverImageProcessor instead.''' , _A , )
super().__init__(*_A , **_A)
| 485 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
UpperCAmelCase =logging.get_logger(__name__)
UpperCAmelCase ={
'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class lowerCamelCase__ ( __lowercase ):
'''simple docstring'''
_lowerCamelCase = '''gpt_neo'''
_lowerCamelCase = ['''past_key_values''']
_lowerCamelCase = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self ,lowerCamelCase_=5_0_2_5_7 ,lowerCamelCase_=2_0_4_8 ,lowerCamelCase_=2_0_4_8 ,lowerCamelCase_=2_4 ,lowerCamelCase_=[[["global", "local"], 1_2]] ,lowerCamelCase_=1_6 ,lowerCamelCase_=None ,lowerCamelCase_=2_5_6 ,lowerCamelCase_="gelu_new" ,lowerCamelCase_=0.0 ,lowerCamelCase_=0.0 ,lowerCamelCase_=0.0 ,lowerCamelCase_=0.1 ,lowerCamelCase_=1E-5 ,lowerCamelCase_=0.02 ,lowerCamelCase_=True ,lowerCamelCase_=5_0_2_5_6 ,lowerCamelCase_=5_0_2_5_6 ,**lowerCamelCase_ ,) -> Optional[Any]:
A = vocab_size
A = max_position_embeddings
A = hidden_size
A = num_layers
A = num_heads
A = intermediate_size
A = window_size
A = activation_function
A = resid_dropout
A = embed_dropout
A = attention_dropout
A = classifier_dropout
A = layer_norm_epsilon
A = initializer_range
A = use_cache
A = bos_token_id
A = eos_token_id
A = attention_types
A = self.expand_attention_types_params(lowerCamelCase_ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.attention_layers)` == `config.num_layers` """
f'but is `len(config.attention_layers) = {len(self.attention_layers )}`, '
f'`config.num_layers = {self.num_layers}`. '
"""`config.attention_layers` is prepared using `config.attention_types`. """
"""Please verify the value of `config.attention_types` argument.""" )
super().__init__(bos_token_id=lowerCamelCase_ ,eos_token_id=lowerCamelCase_ ,**lowerCamelCase_ )
@staticmethod
def UpperCamelCase__ ( lowerCamelCase_ ) -> Any:
A = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def _A ( _a : Optional[int] , _a : List[str] , _a : Optional[int] , _a : Any ):
"""simple docstring"""
import torch
A = input.size()
A = len(_lowerCamelCase )
A = shape[dimension]
A = torch.arange(0 , _lowerCamelCase , _lowerCamelCase )
A = torch.div(sizedim - size , _lowerCamelCase , rounding_mode="""floor""" ) + 1
A = torch.arange(_lowerCamelCase ) + low_indices[:min_length][:, None]
A = [slice(_lowerCamelCase )] * rank
A = indices
A = input[s]
A = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(_lowerCamelCase )
def _A ( _a : Tuple , _a : Optional[Any] ):
"""simple docstring"""
import torch
A = torch.arange(1 , _lowerCamelCase )
A = torch.remainder(_lowerCamelCase , _lowerCamelCase )
A = remainders == 0
A = candidates[divisor_indices]
A = torch.max(_lowerCamelCase )
return largest_divisor, torch.div(_lowerCamelCase , _lowerCamelCase , rounding_mode="""floor""" )
class lowerCamelCase__ ( __lowercase ):
'''simple docstring'''
@property
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
A = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase_ ,direction="""inputs""" )
A = {0: "batch", 1: "past_sequence + sequence"}
else:
A = {0: "batch", 1: "sequence"}
return common_inputs
@property
def UpperCamelCase__ ( self ) -> int:
return self._config.num_heads
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = -1 ,lowerCamelCase_ = -1 ,lowerCamelCase_ = False ,lowerCamelCase_ = None ,) -> Mapping[str, Any]:
A = super(lowerCamelCase_ ,self ).generate_dummy_inputs(
lowerCamelCase_ ,batch_size=lowerCamelCase_ ,seq_length=lowerCamelCase_ ,is_pair=lowerCamelCase_ ,framework=lowerCamelCase_ )
# We need to order the input in the way they appears in the forward()
A = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
A = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
A = seqlen + 2
A = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
A = [
(torch.zeros(lowerCamelCase_ ), torch.zeros(lowerCamelCase_ )) for _ in range(self.num_layers )
]
A = common_inputs["attention_mask"]
if self.use_past:
A = ordered_inputs["attention_mask"].dtype
A = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(lowerCamelCase_ ,lowerCamelCase_ ,dtype=lowerCamelCase_ )] ,dim=1 )
return ordered_inputs
@property
def UpperCamelCase__ ( self ) -> int:
return 1_3
| 706 |
"""simple docstring"""
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
UpperCAmelCase =logging.get_logger(__name__)
UpperCAmelCase ={
"google/umt5-small": "https://huggingface.co/google/umt5-small/resolve/main/config.json",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowerCamelCase = '''umt5'''
_lowerCamelCase = ['''past_key_values''']
def __init__( self ,lowerCamelCase_=2_5_0_1_1_2 ,lowerCamelCase_=5_1_2 ,lowerCamelCase_=6_4 ,lowerCamelCase_=1_0_2_4 ,lowerCamelCase_=8 ,lowerCamelCase_=None ,lowerCamelCase_=6 ,lowerCamelCase_=3_2 ,lowerCamelCase_=1_2_8 ,lowerCamelCase_=0.1 ,lowerCamelCase_=1E-6 ,lowerCamelCase_=1.0 ,lowerCamelCase_="gated-gelu" ,lowerCamelCase_=True ,lowerCamelCase_=True ,lowerCamelCase_="T5Tokenizer" ,lowerCamelCase_=True ,lowerCamelCase_=0 ,lowerCamelCase_=1 ,lowerCamelCase_=0 ,**lowerCamelCase_ ,) -> Dict:
super().__init__(
is_encoder_decoder=lowerCamelCase_ ,tokenizer_class=lowerCamelCase_ ,tie_word_embeddings=lowerCamelCase_ ,pad_token_id=lowerCamelCase_ ,eos_token_id=lowerCamelCase_ ,decoder_start_token_id=lowerCamelCase_ ,**lowerCamelCase_ ,)
A = vocab_size
A = d_model
A = d_kv
A = d_ff
A = num_layers
A = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
A = num_heads
A = relative_attention_num_buckets
A = relative_attention_max_distance
A = dropout_rate
A = layer_norm_epsilon
A = initializer_factor
A = feed_forward_proj
A = use_cache
A = self.feed_forward_proj.split("""-""" )
A = act_info[-1]
A = act_info[0] == """gated"""
if len(lowerCamelCase_ ) > 1 and act_info[0] != "gated" or len(lowerCamelCase_ ) > 2:
raise ValueError(
f'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""" )
if feed_forward_proj == "gated-gelu":
A = """gelu_new"""
@property
def UpperCamelCase__ ( self ) -> Dict:
return self.d_model
@property
def UpperCamelCase__ ( self ) -> Any:
return self.num_heads
@property
def UpperCamelCase__ ( self ) -> int:
return self.num_layers
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
A = {
"""input_ids""": {0: """batch""", 1: """encoder_sequence"""},
"""attention_mask""": {0: """batch""", 1: """encoder_sequence"""},
}
if self.use_past:
A = """past_encoder_sequence + sequence"""
A = {0: """batch"""}
A = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
A = {0: """batch""", 1: """decoder_sequence"""}
A = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase_ ,direction="""inputs""" )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def UpperCamelCase__ ( self ) -> int:
return 1_3
@property
def UpperCamelCase__ ( self ) -> float:
return 5E-4
| 255 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
a : Tuple = {'tokenization_herbert': ['HerbertTokenizer']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = ['HerbertTokenizerFast']
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
a : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 640 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
A_ = {
"""configuration_gpt_neo""": ["""GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoConfig""", """GPTNeoOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoForCausalLM""",
"""GPTNeoForQuestionAnswering""",
"""GPTNeoForSequenceClassification""",
"""GPTNeoForTokenClassification""",
"""GPTNeoModel""",
"""GPTNeoPreTrainedModel""",
"""load_tf_weights_in_gpt_neo""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""FlaxGPTNeoForCausalLM""",
"""FlaxGPTNeoModel""",
"""FlaxGPTNeoPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
A_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 29 | 0 |
# Algorithm for the pigeonhole sorting
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> int:
lowercase__ = min(_SCREAMING_SNAKE_CASE ) # min() finds the minimum value
lowercase__ = max(_SCREAMING_SNAKE_CASE ) # max() finds the maximum value
lowercase__ = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
lowercase__ = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
lowercase__ = 0
for count in range(_SCREAMING_SNAKE_CASE ):
while holes[count] > 0:
holes[count] -= 1
lowercase__ = count + min_val
i += 1
def __UpperCamelCase () -> str:
lowercase__ = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(_SCREAMING_SNAKE_CASE )
print('Sorted order is:' , ' '.join(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
main()
| 721 |
import argparse
import json
import subprocess
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
lowercase__ = []
lowercase__ = (
F"""curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""""
' https://api.github.com/repos/huggingface/transformers/actions/runners'
)
lowercase__ = subprocess.run(_SCREAMING_SNAKE_CASE , shell=_SCREAMING_SNAKE_CASE , stdout=subprocess.PIPE )
lowercase__ = output.stdout.decode('utf-8' )
lowercase__ = json.loads(_SCREAMING_SNAKE_CASE )
lowercase__ = status['runners']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_SCREAMING_SNAKE_CASE )
# save the result so we can report them on Slack
with open('offline_runners.txt' , 'w' ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) )
if len(_SCREAMING_SNAKE_CASE ) > 0:
lowercase__ = '\n'.join([x['name'] for x in offline_runners] )
raise ValueError(F"""The following runners are offline:\n{failed}""" )
if __name__ == "__main__":
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
return values.split(',' )
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--target_runners""",
default=None,
type=list_str,
required=True,
help="""Comma-separated list of runners to check status.""",
)
parser.add_argument(
"""--token""", default=None, type=str, required=True, help="""A token that has actions:read permission."""
)
lowercase_ = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 45 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
a_ = {"""configuration_deit""": ["""DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DeiTConfig""", """DeiTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""DeiTFeatureExtractor"""]
a_ = ["""DeiTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DeiTForImageClassification""",
"""DeiTForImageClassificationWithTeacher""",
"""DeiTForMaskedImageModeling""",
"""DeiTModel""",
"""DeiTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDeiTForImageClassification""",
"""TFDeiTForImageClassificationWithTeacher""",
"""TFDeiTForMaskedImageModeling""",
"""TFDeiTModel""",
"""TFDeiTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 177 |
"""simple docstring"""
import numpy as np
def SCREAMING_SNAKE_CASE__ ( snake_case : np.ndarray , snake_case : np.ndarray , snake_case : float = 1E-1_2 , snake_case : int = 100 , )-> tuple[float, np.ndarray]:
'''simple docstring'''
assert np.shape(snake_case )[0] == np.shape(snake_case )[1]
# Ensure proper dimensionality.
assert np.shape(snake_case )[0] == np.shape(snake_case )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(snake_case ) == np.iscomplexobj(snake_case )
UpperCAmelCase__ : str = np.iscomplexobj(snake_case )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(snake_case , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : List[Any] = 0
UpperCAmelCase__ : Any = 0
UpperCAmelCase__ : List[str] = 1E1_2
while not convergence:
# Multiple matrix by the vector.
UpperCAmelCase__ : Optional[Any] = np.dot(snake_case , snake_case )
# Normalize the resulting output vector.
UpperCAmelCase__ : Tuple = w / np.linalg.norm(snake_case )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
UpperCAmelCase__ : Any = vector.conj().T if is_complex else vector.T
UpperCAmelCase__ : List[str] = np.dot(snake_case , np.dot(snake_case , snake_case ) )
# Check convergence.
UpperCAmelCase__ : Optional[Any] = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : Tuple = lambda_
if is_complex:
UpperCAmelCase__ : str = np.real(lambda_ )
return lambda_, vector
def SCREAMING_SNAKE_CASE__ ( )-> None:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
UpperCAmelCase__ : Dict = np.array([41, 4, 20] )
UpperCAmelCase__ : Union[str, Any] = real_input_matrix.astype(np.complexaaa )
UpperCAmelCase__ : Optional[int] = np.triu(1J * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
UpperCAmelCase__ : int = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
UpperCAmelCase__ : List[str] = real_input_matrix
UpperCAmelCase__ : Any = real_vector
elif problem_type == "complex":
UpperCAmelCase__ : str = complex_input_matrix
UpperCAmelCase__ : List[Any] = complex_vector
# Our implementation.
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = power_iteration(snake_case , snake_case )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = np.linalg.eigh(snake_case )
# Last eigenvalue is the maximum one.
UpperCAmelCase__ : str = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
UpperCAmelCase__ : Optional[int] = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(snake_case ) - np.abs(snake_case ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 438 | 0 |
from math import factorial
UpperCamelCase__ = {str(d): factorial(d) for d in range(10)}
def UpperCAmelCase__ ( _A ):
"""simple docstring"""
return sum(DIGIT_FACTORIAL[d] for d in str(_A ) )
def UpperCAmelCase__ ( ):
"""simple docstring"""
a_ = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , _A ) if sum_of_digit_factorial(_A ) == i )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 143 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowercase ( a__ , a__ , unittest.TestCase ):
_lowerCAmelCase = IFImgaImgSuperResolutionPipeline
_lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"width", "height"}
_lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"original_image"} )
_lowerCAmelCase = PipelineTesterMixin.required_optional_params - {"latents"}
def __magic_name__ ( self : List[str] ):
return self._get_superresolution_dummy_components()
def __magic_name__ ( self : List[str] , lowercase__ : List[str] , lowercase__ : Optional[int]=0 ):
if str(lowercase__ ).startswith('''mps''' ):
a_ = torch.manual_seed(lowercase__ )
else:
a_ = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
a_ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowercase__ ) ).to(lowercase__ )
a_ = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(lowercase__ ) ).to(lowercase__ )
a_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __magic_name__ ( self : str ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def __magic_name__ ( self : List[str] ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def __magic_name__ ( self : Any ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def __magic_name__ ( self : int ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def __magic_name__ ( self : Union[str, Any] ):
self._test_save_load_local()
def __magic_name__ ( self : Tuple ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 143 | 1 |
from ..utils import DummyObject, requires_backends
class UpperCAmelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = ["""torch""", """transformers""", """onnx"""]
def __init__( self ,*__snake_case ,**__snake_case ):
"""simple docstring"""
requires_backends(self ,['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __UpperCAmelCase ( cls ,*__snake_case ,**__snake_case ):
"""simple docstring"""
requires_backends(cls ,['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __UpperCAmelCase ( cls ,*__snake_case ,**__snake_case ):
"""simple docstring"""
requires_backends(cls ,['''torch''', '''transformers''', '''onnx'''] )
class UpperCAmelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = ["""torch""", """transformers""", """onnx"""]
def __init__( self ,*__snake_case ,**__snake_case ):
"""simple docstring"""
requires_backends(self ,['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __UpperCAmelCase ( cls ,*__snake_case ,**__snake_case ):
"""simple docstring"""
requires_backends(cls ,['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __UpperCAmelCase ( cls ,*__snake_case ,**__snake_case ):
"""simple docstring"""
requires_backends(cls ,['''torch''', '''transformers''', '''onnx'''] )
class UpperCAmelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = ["""torch""", """transformers""", """onnx"""]
def __init__( self ,*__snake_case ,**__snake_case ):
"""simple docstring"""
requires_backends(self ,['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __UpperCAmelCase ( cls ,*__snake_case ,**__snake_case ):
"""simple docstring"""
requires_backends(cls ,['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __UpperCAmelCase ( cls ,*__snake_case ,**__snake_case ):
"""simple docstring"""
requires_backends(cls ,['''torch''', '''transformers''', '''onnx'''] )
class UpperCAmelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : int = ["""torch""", """transformers""", """onnx"""]
def __init__( self ,*__snake_case ,**__snake_case ):
"""simple docstring"""
requires_backends(self ,['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __UpperCAmelCase ( cls ,*__snake_case ,**__snake_case ):
"""simple docstring"""
requires_backends(cls ,['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __UpperCAmelCase ( cls ,*__snake_case ,**__snake_case ):
"""simple docstring"""
requires_backends(cls ,['''torch''', '''transformers''', '''onnx'''] )
class UpperCAmelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[str] = ["""torch""", """transformers""", """onnx"""]
def __init__( self ,*__snake_case ,**__snake_case ):
"""simple docstring"""
requires_backends(self ,['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __UpperCAmelCase ( cls ,*__snake_case ,**__snake_case ):
"""simple docstring"""
requires_backends(cls ,['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __UpperCAmelCase ( cls ,*__snake_case ,**__snake_case ):
"""simple docstring"""
requires_backends(cls ,['''torch''', '''transformers''', '''onnx'''] )
class UpperCAmelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : int = ["""torch""", """transformers""", """onnx"""]
def __init__( self ,*__snake_case ,**__snake_case ):
"""simple docstring"""
requires_backends(self ,['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __UpperCAmelCase ( cls ,*__snake_case ,**__snake_case ):
"""simple docstring"""
requires_backends(cls ,['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __UpperCAmelCase ( cls ,*__snake_case ,**__snake_case ):
"""simple docstring"""
requires_backends(cls ,['''torch''', '''transformers''', '''onnx'''] )
| 188 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def UpperCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
A_ = HfArgumentParser(_UpperCAmelCase )
A_ = parser.parse_args_into_dataclasses()[0]
A_ = TensorFlowBenchmark(args=_UpperCAmelCase )
try:
A_ = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
A_ = '''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
A_ = ''' '''.join(str(_UpperCAmelCase ).split(''' ''' )[:-1] )
A_ = ''''''
A_ = eval(str(_UpperCAmelCase ).split(''' ''' )[-1] )
A_ = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
A_ = full_error_msg + begin_error_msg + str(_UpperCAmelCase )
raise ValueError(_UpperCAmelCase )
benchmark.run()
if __name__ == "__main__":
main()
| 188 | 1 |
"""simple docstring"""
def __lowerCAmelCase( ):
"""simple docstring"""
return [
a * b * (1_000 - a - b)
for a in range(1 ,999 )
for b in range(_lowerCamelCase ,999 )
if (a * a + b * b == (1_000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 716 | """simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class _lowerCamelCase (__lowerCamelCase ):
_snake_case = ["input_features", "attention_mask"]
def __init__( self : int , lowerCamelCase_ : List[str]=8_0 , lowerCamelCase_ : Tuple=1_6_0_0_0 , lowerCamelCase_ : Union[str, Any]=0.0 , lowerCamelCase_ : List[Any]=1_0 , lowerCamelCase_ : List[str]=2_5 , lowerCamelCase_ : List[Any]="hamming_window" , lowerCamelCase_ : Tuple=3_2768.0 , lowerCamelCase_ : int=0.97 , lowerCamelCase_ : Optional[int]=1.0 , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Optional[int]=False , **lowerCamelCase_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(feature_size=lowerCamelCase_ , sampling_rate=lowerCamelCase_ , padding_value=lowerCamelCase_ , **lowerCamelCase_ )
_lowercase : Dict = feature_size
_lowercase : Dict = sampling_rate
_lowercase : Tuple = padding_value
_lowercase : int = hop_length
_lowercase : Any = win_length
_lowercase : Union[str, Any] = frame_signal_scale
_lowercase : Tuple = preemphasis_coeff
_lowercase : Tuple = mel_floor
_lowercase : Tuple = normalize_means
_lowercase : List[Any] = normalize_vars
_lowercase : List[str] = win_function
_lowercase : int = return_attention_mask
_lowercase : Optional[Any] = win_length * sampling_rate // 1_0_0_0
_lowercase : Tuple = hop_length * sampling_rate // 1_0_0_0
_lowercase : str = optimal_fft_length(self.sample_size )
_lowercase : Dict = (self.n_fft // 2) + 1
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : np.array ):
"""simple docstring"""
if self.win_function == "hamming_window":
_lowercase : List[Any] = window_function(window_length=self.sample_size , name=self.win_function , periodic=lowerCamelCase_ )
else:
_lowercase : Union[str, Any] = window_function(window_length=self.sample_size , name=self.win_function )
_lowercase : Tuple = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
_lowercase : Tuple = spectrogram(
one_waveform * self.frame_signal_scale , window=lowerCamelCase_ , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=lowerCamelCase_ , preemphasis=self.preemphasis_coeff , mel_filters=lowerCamelCase_ , mel_floor=self.mel_floor , log_mel='log' , )
return msfc_features.T
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple ):
"""simple docstring"""
if self.normalize_means:
_lowercase : Optional[int] = x[:input_length].mean(axis=0 )
_lowercase : int = np.subtract(lowerCamelCase_ , lowerCamelCase_ )
if self.normalize_vars:
_lowercase : int = x[:input_length].std(axis=0 )
_lowercase : Optional[Any] = np.divide(lowerCamelCase_ , lowerCamelCase_ )
if input_length < x.shape[0]:
_lowercase : Dict = padding_value
# make sure array is in float32
_lowercase : Tuple = x.astype(np.floataa )
return x
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : List[np.ndarray] , lowerCamelCase_ : Optional[np.ndarray] = None ):
"""simple docstring"""
_lowercase : Dict = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(lowerCamelCase_ , lowerCamelCase_ , self.padding_value ) for x, n in zip(lowerCamelCase_ , lowerCamelCase_ )]
def __call__( self : Dict , lowerCamelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowerCamelCase_ : Union[bool, str, PaddingStrategy] = False , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : Optional[Union[str, TensorType]] = None , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Optional[int] , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
_lowercase : Optional[Any] = isinstance(lowerCamelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
_lowercase : Optional[int] = is_batched_numpy or (
isinstance(lowerCamelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_lowercase : str = [np.asarray(lowerCamelCase_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase_ , np.ndarray ):
_lowercase : Tuple = np.asarray(lowerCamelCase_ , dtype=np.floataa )
elif isinstance(lowerCamelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_lowercase : Optional[int] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowercase : str = [raw_speech]
# extract fbank features
_lowercase : Optional[Any] = [self._extract_mfsc_features(lowerCamelCase_ ) for one_waveform in raw_speech]
# convert into correct format for padding
_lowercase : Optional[int] = BatchFeature({'input_features': features} )
_lowercase : Tuple = self.pad(
lowerCamelCase_ , padding=lowerCamelCase_ , max_length=lowerCamelCase_ , truncation=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , **lowerCamelCase_ , )
# make sure list is in array format
_lowercase : Dict = padded_inputs.get('input_features' )
if isinstance(input_features[0] , lowerCamelCase_ ):
_lowercase : List[str] = [np.asarray(lowerCamelCase_ , dtype=np.floataa ) for feature in input_features]
_lowercase : List[Any] = padded_inputs.get('attention_mask' )
if attention_mask is not None:
_lowercase : Union[str, Any] = [np.asarray(lowerCamelCase_ , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
_lowercase : int = (
np.array(lowerCamelCase_ , dtype=np.intaa )
if self._get_padding_strategies(lowerCamelCase_ , max_length=lowerCamelCase_ ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
_lowercase : List[Any] = self.normalize(
padded_inputs['input_features'] , attention_mask=lowerCamelCase_ )
if return_tensors is not None:
_lowercase : Union[str, Any] = padded_inputs.convert_to_tensors(lowerCamelCase_ )
return padded_inputs
| 283 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
lowercase_ : Optional[int] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase_ : Dict = {
'''vocab_file''': {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''unc-nlp/lxmert-base-uncased''': (
'''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
lowercase_ : int = {
'''unc-nlp/lxmert-base-uncased''': 512,
}
lowercase_ : Optional[Any] = {
'''unc-nlp/lxmert-base-uncased''': {'''do_lower_case''': True},
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_INIT_CONFIGURATION
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = LxmertTokenizer
def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase="[UNK]" , _lowerCAmelCase="[SEP]" , _lowerCAmelCase="[PAD]" , _lowerCAmelCase="[CLS]" , _lowerCAmelCase="[MASK]" , _lowerCAmelCase=True , _lowerCAmelCase=None , **_lowerCAmelCase , ) -> Dict:
'''simple docstring'''
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , tokenize_chinese_chars=_lowerCAmelCase , strip_accents=_lowerCAmelCase , **_lowerCAmelCase , )
lowercase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , _lowerCAmelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , _lowerCAmelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , _lowerCAmelCase ) != tokenize_chinese_chars
):
lowercase = getattr(_lowerCAmelCase , normalizer_state.pop("""type""" ) )
lowercase = do_lower_case
lowercase = strip_accents
lowercase = tokenize_chinese_chars
lowercase = normalizer_class(**_lowerCAmelCase )
lowercase = do_lower_case
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> str:
'''simple docstring'''
lowercase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> List[int]:
'''simple docstring'''
lowercase = [self.sep_token_id]
lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
lowercase = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
| 588 |
'''simple docstring'''
import argparse
import os
import re
lowercase_ : Optional[Any] = '''src/transformers'''
# Pattern that looks at the indentation in a line.
lowercase_ : int = re.compile(r'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
lowercase_ : List[str] = re.compile(r'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowercase_ : Optional[int] = re.compile(r'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
lowercase_ : List[Any] = re.compile(r'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowercase_ : List[str] = re.compile(r'''\[([^\]]+)\]''')
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict ):
lowercase = _re_indent.search(lowercase_ )
return "" if search is None else search.groups()[0]
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict , lowercase_ : int="" , lowercase_ : int=None , lowercase_ : List[Any]=None ):
lowercase = 0
lowercase = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(lowercase_ ):
index += 1
lowercase = ["""\n""".join(lines[:index] )]
else:
lowercase = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowercase = [lines[index]]
index += 1
while index < len(lowercase_ ) and (end_prompt is None or not lines[index].startswith(lowercase_ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(lowercase_ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(lowercase_ ) )
if index < len(lowercase_ ) - 1:
lowercase = [lines[index + 1]]
index += 1
else:
lowercase = []
else:
blocks.append("""\n""".join(lowercase_ ) )
lowercase = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(lowercase_ ) > 0:
blocks.append("""\n""".join(lowercase_ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowercase_ ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[int] ):
def _inner(lowercase_ : str ):
return key(lowercase_ ).lower().replace("""_""" , """""" )
return _inner
def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : List[str]=None ):
# If no key is provided, we use a noop.
def noop(lowercase_ : int ):
return x
if key is None:
lowercase = noop
# Constants are all uppercase, they go first.
lowercase = [obj for obj in objects if key(lowercase_ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowercase = [obj for obj in objects if key(lowercase_ )[0].isupper() and not key(lowercase_ ).isupper()]
# Functions begin with a lowercase, they go last.
lowercase = [obj for obj in objects if not key(lowercase_ )[0].isupper()]
lowercase = ignore_underscore(lowercase_ )
return sorted(lowercase_ , key=lowercase_ ) + sorted(lowercase_ , key=lowercase_ ) + sorted(lowercase_ , key=lowercase_ )
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
# This inner function sort imports between [ ].
def _replace(lowercase_ : Union[str, Any] ):
lowercase = match.groups()[0]
if "," not in imports:
return F"""[{imports}]"""
lowercase = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowercase = keys[:-1]
return "[" + ", ".join([F"""\"{k}\"""" for k in sort_objects(lowercase_ )] ) + "]"
lowercase = import_statement.split("""\n""" )
if len(lowercase_ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowercase = 2 if lines[1].strip() == """[""" else 1
lowercase = [(i, _re_strip_line.search(lowercase_ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowercase = sort_objects(lowercase_ , key=lambda lowercase_ : x[1] )
lowercase = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(lowercase_ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowercase = _re_bracket_content.sub(_replace , lines[1] )
else:
lowercase = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowercase = keys[:-1]
lowercase = get_indent(lines[1] ) + """, """.join([F"""\"{k}\"""" for k in sort_objects(lowercase_ )] )
return "\n".join(lowercase_ )
else:
# Finally we have to deal with imports fitting on one line
lowercase = _re_bracket_content.sub(_replace , lowercase_ )
return import_statement
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : List[str]=True ):
with open(lowercase_ , encoding="""utf-8""" ) as f:
lowercase = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowercase = split_code_in_indented_blocks(
lowercase_ , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(lowercase_ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowercase = main_blocks[block_idx]
lowercase = block.split("""\n""" )
# Get to the start of the imports.
lowercase = 0
while line_idx < len(lowercase_ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowercase = len(lowercase_ )
else:
line_idx += 1
if line_idx >= len(lowercase_ ):
continue
# Ignore beginning and last line: they don't contain anything.
lowercase = """\n""".join(block_lines[line_idx:-1] )
lowercase = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowercase = split_code_in_indented_blocks(lowercase_ , indent_level=lowercase_ )
# We have two categories of import key: list or _import_structure[key].append/extend
lowercase = _re_direct_key if """_import_structure = {""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowercase = [(pattern.search(lowercase_ ).groups()[0] if pattern.search(lowercase_ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowercase = [(i, key) for i, key in enumerate(lowercase_ ) if key is not None]
lowercase = [x[0] for x in sorted(lowercase_ , key=lambda lowercase_ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowercase = 0
lowercase = []
for i in range(len(lowercase_ ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
lowercase = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(lowercase_ )
count += 1
# And we put our main block back together with its first and last line.
lowercase = """\n""".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(lowercase_ ):
if check_only:
return True
else:
print(F"""Overwriting {file}.""" )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f:
f.write("""\n""".join(lowercase_ ) )
def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any]=True ):
lowercase = []
for root, _, files in os.walk(lowercase_ ):
if "__init__.py" in files:
lowercase = sort_imports(os.path.join(lowercase_ , """__init__.py""" ) , check_only=lowercase_ )
if result:
lowercase = [os.path.join(lowercase_ , """__init__.py""" )]
if len(lowercase_ ) > 0:
raise ValueError(F"""Would overwrite {len(lowercase_ )} files, run `make style`.""" )
if __name__ == "__main__":
lowercase_ : str = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
lowercase_ : Dict = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 588 | 1 |
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = OrderedDict(
[
("""audio-spectrogram-transformer""", """ASTFeatureExtractor"""),
("""beit""", """BeitFeatureExtractor"""),
("""chinese_clip""", """ChineseCLIPFeatureExtractor"""),
("""clap""", """ClapFeatureExtractor"""),
("""clip""", """CLIPFeatureExtractor"""),
("""clipseg""", """ViTFeatureExtractor"""),
("""conditional_detr""", """ConditionalDetrFeatureExtractor"""),
("""convnext""", """ConvNextFeatureExtractor"""),
("""cvt""", """ConvNextFeatureExtractor"""),
("""data2vec-audio""", """Wav2Vec2FeatureExtractor"""),
("""data2vec-vision""", """BeitFeatureExtractor"""),
("""deformable_detr""", """DeformableDetrFeatureExtractor"""),
("""deit""", """DeiTFeatureExtractor"""),
("""detr""", """DetrFeatureExtractor"""),
("""dinat""", """ViTFeatureExtractor"""),
("""donut-swin""", """DonutFeatureExtractor"""),
("""dpt""", """DPTFeatureExtractor"""),
("""encodec""", """EncodecFeatureExtractor"""),
("""flava""", """FlavaFeatureExtractor"""),
("""glpn""", """GLPNFeatureExtractor"""),
("""groupvit""", """CLIPFeatureExtractor"""),
("""hubert""", """Wav2Vec2FeatureExtractor"""),
("""imagegpt""", """ImageGPTFeatureExtractor"""),
("""layoutlmv2""", """LayoutLMv2FeatureExtractor"""),
("""layoutlmv3""", """LayoutLMv3FeatureExtractor"""),
("""levit""", """LevitFeatureExtractor"""),
("""maskformer""", """MaskFormerFeatureExtractor"""),
("""mctct""", """MCTCTFeatureExtractor"""),
("""mobilenet_v1""", """MobileNetV1FeatureExtractor"""),
("""mobilenet_v2""", """MobileNetV2FeatureExtractor"""),
("""mobilevit""", """MobileViTFeatureExtractor"""),
("""nat""", """ViTFeatureExtractor"""),
("""owlvit""", """OwlViTFeatureExtractor"""),
("""perceiver""", """PerceiverFeatureExtractor"""),
("""poolformer""", """PoolFormerFeatureExtractor"""),
("""regnet""", """ConvNextFeatureExtractor"""),
("""resnet""", """ConvNextFeatureExtractor"""),
("""segformer""", """SegformerFeatureExtractor"""),
("""sew""", """Wav2Vec2FeatureExtractor"""),
("""sew-d""", """Wav2Vec2FeatureExtractor"""),
("""speech_to_text""", """Speech2TextFeatureExtractor"""),
("""speecht5""", """SpeechT5FeatureExtractor"""),
("""swiftformer""", """ViTFeatureExtractor"""),
("""swin""", """ViTFeatureExtractor"""),
("""swinv2""", """ViTFeatureExtractor"""),
("""table-transformer""", """DetrFeatureExtractor"""),
("""timesformer""", """VideoMAEFeatureExtractor"""),
("""tvlt""", """TvltFeatureExtractor"""),
("""unispeech""", """Wav2Vec2FeatureExtractor"""),
("""unispeech-sat""", """Wav2Vec2FeatureExtractor"""),
("""van""", """ConvNextFeatureExtractor"""),
("""videomae""", """VideoMAEFeatureExtractor"""),
("""vilt""", """ViltFeatureExtractor"""),
("""vit""", """ViTFeatureExtractor"""),
("""vit_mae""", """ViTFeatureExtractor"""),
("""vit_msn""", """ViTFeatureExtractor"""),
("""wav2vec2""", """Wav2Vec2FeatureExtractor"""),
("""wav2vec2-conformer""", """Wav2Vec2FeatureExtractor"""),
("""wavlm""", """Wav2Vec2FeatureExtractor"""),
("""whisper""", """WhisperFeatureExtractor"""),
("""xclip""", """CLIPFeatureExtractor"""),
("""yolos""", """YolosFeatureExtractor"""),
]
)
UpperCamelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def _lowerCAmelCase ( __magic_name__ : str ) -> int:
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
lowercase : int =model_type_to_module_name(__magic_name__ )
lowercase : int =importlib.import_module(f'''.{module_name}''' , '''transformers.models''' )
try:
return getattr(__magic_name__ , __magic_name__ )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(__magic_name__ , '''__name__''' , __magic_name__ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
lowercase : Any =importlib.import_module('''transformers''' )
if hasattr(__magic_name__ , __magic_name__ ):
return getattr(__magic_name__ , __magic_name__ )
return None
def _lowerCAmelCase ( __magic_name__ : Union[str, os.PathLike] , __magic_name__ : Optional[Union[str, os.PathLike]] = None , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : Optional[Dict[str, str]] = None , __magic_name__ : Optional[Union[bool, str]] = None , __magic_name__ : Optional[str] = None , __magic_name__ : bool = False , **__magic_name__ : List[str] , ) -> str:
lowercase : str =get_file_from_repo(
__magic_name__ , __magic_name__ , cache_dir=__magic_name__ , force_download=__magic_name__ , resume_download=__magic_name__ , proxies=__magic_name__ , use_auth_token=__magic_name__ , revision=__magic_name__ , local_files_only=__magic_name__ , )
if resolved_config_file is None:
logger.info(
'''Could not locate the feature extractor configuration file, will try to use the model config instead.''' )
return {}
with open(__magic_name__ , encoding='''utf-8''' ) as reader:
return json.load(__magic_name__ )
class __SCREAMING_SNAKE_CASE :
def __init__( self : Tuple ):
'''simple docstring'''
raise EnvironmentError(
'''AutoFeatureExtractor is designed to be instantiated '''
'''using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(UpperCAmelCase__ )
def lowerCamelCase_ ( cls : List[str] , UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : Any ):
'''simple docstring'''
lowercase : str =kwargs.pop('''config''' , UpperCAmelCase__ )
lowercase : List[Any] =kwargs.pop('''trust_remote_code''' , UpperCAmelCase__ )
lowercase : Optional[int] =True
lowercase , lowercase : Optional[int] =FeatureExtractionMixin.get_feature_extractor_dict(UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase : Optional[Any] =config_dict.get('''feature_extractor_type''' , UpperCAmelCase__ )
lowercase : Optional[Any] =None
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
lowercase : int =config_dict['''auto_map''']['''AutoFeatureExtractor''']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase : Dict =AutoConfig.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
# It could be in `config.feature_extractor_type``
lowercase : int =getattr(UpperCAmelCase__ , '''feature_extractor_type''' , UpperCAmelCase__ )
if hasattr(UpperCAmelCase__ , '''auto_map''' ) and "AutoFeatureExtractor" in config.auto_map:
lowercase : int =config.auto_map['''AutoFeatureExtractor''']
if feature_extractor_class is not None:
lowercase : int =feature_extractor_class_from_name(UpperCAmelCase__ )
lowercase : List[str] =feature_extractor_auto_map is not None
lowercase : List[Any] =feature_extractor_class is not None or type(UpperCAmelCase__ ) in FEATURE_EXTRACTOR_MAPPING
lowercase : Any =resolve_trust_remote_code(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if has_remote_code and trust_remote_code:
lowercase : Any =get_class_from_dynamic_module(
UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase : Union[str, Any] =kwargs.pop('''code_revision''' , UpperCAmelCase__ )
if os.path.isdir(UpperCAmelCase__ ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(UpperCAmelCase__ , **UpperCAmelCase__ )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(UpperCAmelCase__ , **UpperCAmelCase__ )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(UpperCAmelCase__ ) in FEATURE_EXTRACTOR_MAPPING:
lowercase : Optional[int] =FEATURE_EXTRACTOR_MAPPING[type(UpperCAmelCase__ )]
return feature_extractor_class.from_dict(UpperCAmelCase__ , **UpperCAmelCase__ )
raise ValueError(
F'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '''
F'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '''
F'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def lowerCamelCase_ ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int ):
'''simple docstring'''
FEATURE_EXTRACTOR_MAPPING.register(UpperCAmelCase__ , UpperCAmelCase__ )
| 88 |
'''simple docstring'''
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
UpperCamelCase_ = logging.getLogger(__name__)
UpperCamelCase_ = tf.data.AUTOTUNE
def _lowerCAmelCase ( ) -> Any:
lowercase : Dict =argparse.ArgumentParser(description='''Train a masked language model on TPU.''' )
parser.add_argument(
'''--pretrained_model_config''' , type=__magic_name__ , default='''roberta-base''' , help='''The model config to use. Note that we don\'t copy the model\'s weights, only the config!''' , )
parser.add_argument(
'''--tokenizer''' , type=__magic_name__ , default='''unigram-tokenizer-wikitext''' , help='''The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.''' , )
parser.add_argument(
'''--per_replica_batch_size''' , type=__magic_name__ , default=8 , help='''Batch size per TPU core.''' , )
parser.add_argument(
'''--no_tpu''' , action='''store_true''' , help='''If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.''' , )
parser.add_argument(
'''--tpu_name''' , type=__magic_name__ , help='''Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.''' , default='''local''' , )
parser.add_argument(
'''--tpu_zone''' , type=__magic_name__ , help='''Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.''' , )
parser.add_argument(
'''--gcp_project''' , type=__magic_name__ , help='''Google cloud project name. Only used for non-Colab TPU nodes.''' )
parser.add_argument(
'''--bfloat16''' , action='''store_true''' , help='''Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.''' , )
parser.add_argument(
'''--train_dataset''' , type=__magic_name__ , help='''Path to training dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--shuffle_buffer_size''' , type=__magic_name__ , default=2**18 , help='''Size of the shuffle buffer (in samples)''' , )
parser.add_argument(
'''--eval_dataset''' , type=__magic_name__ , help='''Path to evaluation dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--num_epochs''' , type=__magic_name__ , default=1 , help='''Number of epochs to train for.''' , )
parser.add_argument(
'''--learning_rate''' , type=__magic_name__ , default=1E-4 , help='''Learning rate to use for training.''' , )
parser.add_argument(
'''--weight_decay_rate''' , type=__magic_name__ , default=1E-3 , help='''Weight decay rate to use for training.''' , )
parser.add_argument(
'''--max_length''' , type=__magic_name__ , default=512 , help='''Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py''' , )
parser.add_argument(
'''--mlm_probability''' , type=__magic_name__ , default=0.1_5 , help='''Fraction of tokens to mask during training.''' , )
parser.add_argument('''--output_dir''' , type=__magic_name__ , required=__magic_name__ , help='''Path to save model checkpoints to.''' )
parser.add_argument('''--hub_model_id''' , type=__magic_name__ , help='''Model ID to upload to on the Hugging Face Hub.''' )
lowercase : Union[str, Any] =parser.parse_args()
return args
def _lowerCAmelCase ( __magic_name__ : List[str] ) -> List[Any]:
try:
if args.tpu_name:
lowercase : Dict =tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
lowercase : Optional[int] =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
'''Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or '''
'''--gcp_project. When running on a TPU VM, use --tpu_name local.''' )
tf.config.experimental_connect_to_cluster(__magic_name__ )
tf.tpu.experimental.initialize_tpu_system(__magic_name__ )
return tpu
def _lowerCAmelCase ( __magic_name__ : Tuple ) -> Union[str, Any]:
lowercase : str =0
for file in file_list:
lowercase : List[str] =file.split('''/''' )[-1]
lowercase : Union[str, Any] =re.search(R'''-\d+-(\d+)\.tfrecord''' , __magic_name__ ).group(1 )
lowercase : int =int(__magic_name__ )
num_samples += sample_count
return num_samples
def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int]=None ) -> str:
lowercase : int =count_samples(__magic_name__ )
lowercase : Union[str, Any] =tf.data.Dataset.from_tensor_slices(__magic_name__ )
if shuffle:
lowercase : Union[str, Any] =dataset.shuffle(len(__magic_name__ ) )
lowercase : Any =tf.data.TFRecordDataset(__magic_name__ , num_parallel_reads=__magic_name__ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
lowercase : Optional[int] =dataset.apply(tf.data.experimental.assert_cardinality(__magic_name__ ) )
lowercase : str =dataset.map(__magic_name__ , num_parallel_calls=__magic_name__ )
if shuffle:
assert shuffle_buffer_size is not None
lowercase : int =dataset.shuffle(args.shuffle_buffer_size )
lowercase : Optional[int] =dataset.batch(__magic_name__ , drop_remainder=__magic_name__ )
lowercase : int =dataset.map(__magic_name__ , num_parallel_calls=__magic_name__ )
lowercase : Union[str, Any] =dataset.prefetch(__magic_name__ )
return dataset
def _lowerCAmelCase ( __magic_name__ : Any ) -> str:
if not args.no_tpu:
lowercase : Optional[Any] =initialize_tpu(__magic_name__ )
lowercase : Any =tf.distribute.TPUStrategy(__magic_name__ )
else:
lowercase : Optional[Any] =tf.distribute.OneDeviceStrategy(device='''/gpu:0''' )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy('''mixed_bfloat16''' )
lowercase : Any =AutoTokenizer.from_pretrained(args.tokenizer )
lowercase : Union[str, Any] =AutoConfig.from_pretrained(args.pretrained_model_config )
lowercase : Optional[Any] =tokenizer.vocab_size
lowercase : str =tf.io.gfile.glob(os.path.join(args.train_dataset , '''*.tfrecord''' ) )
if not training_records:
raise ValueError(f'''No .tfrecord files found in {args.train_dataset}.''' )
lowercase : Optional[int] =tf.io.gfile.glob(os.path.join(args.eval_dataset , '''*.tfrecord''' ) )
if not eval_records:
raise ValueError(f'''No .tfrecord files found in {args.eval_dataset}.''' )
lowercase : Any =count_samples(__magic_name__ )
lowercase : str =num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
lowercase : Union[str, Any] =steps_per_epoch * args.num_epochs
with strategy.scope():
lowercase : List[Any] =TFAutoModelForMaskedLM.from_config(__magic_name__ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
lowercase , lowercase : Dict =create_optimizer(
num_train_steps=__magic_name__ , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=__magic_name__ , metrics=['''accuracy'''] )
def decode_fn(__magic_name__ : Optional[Any] ):
lowercase : Union[str, Any] ={
'''input_ids''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
'''attention_mask''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(__magic_name__ , __magic_name__ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
lowercase : str =DataCollatorForLanguageModeling(
tokenizer=__magic_name__ , mlm_probability=args.mlm_probability , mlm=__magic_name__ , return_tensors='''tf''' )
def mask_with_collator(__magic_name__ : Dict ):
# TF really needs an isin() function
lowercase : int =(
~tf.cast(batch['''attention_mask'''] , tf.bool )
| (batch['''input_ids'''] == tokenizer.cls_token_id)
| (batch['''input_ids'''] == tokenizer.sep_token_id)
)
lowercase , lowercase : Union[str, Any] =data_collator.tf_mask_tokens(
batch['''input_ids'''] , vocab_size=len(__magic_name__ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=__magic_name__ , )
return batch
lowercase : List[str] =args.per_replica_batch_size * strategy.num_replicas_in_sync
lowercase : Dict =prepare_dataset(
__magic_name__ , decode_fn=__magic_name__ , mask_fn=__magic_name__ , batch_size=__magic_name__ , shuffle=__magic_name__ , shuffle_buffer_size=args.shuffle_buffer_size , )
lowercase : Union[str, Any] =prepare_dataset(
__magic_name__ , decode_fn=__magic_name__ , mask_fn=__magic_name__ , batch_size=__magic_name__ , shuffle=__magic_name__ , )
lowercase : Tuple =[]
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=__magic_name__ ) )
model.fit(
__magic_name__ , validation_data=__magic_name__ , epochs=args.num_epochs , callbacks=__magic_name__ , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
UpperCamelCase_ = parse_args()
main(args)
| 88 | 1 |
'''simple docstring'''
import os
def UpperCamelCase__ ( ) -> Dict:
with open(os.path.dirname(_lowercase ) + """/grid.txt""" ) as f:
__UpperCAmelCase: Union[str, Any] = [] # noqa: E741
for _ in range(2_0 ):
l.append([int(_lowercase ) for x in f.readline().split()] )
__UpperCAmelCase: Any = 0
# right
for i in range(2_0 ):
for j in range(1_7 ):
__UpperCAmelCase: str = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
__UpperCAmelCase: Dict = temp
# down
for i in range(1_7 ):
for j in range(2_0 ):
__UpperCAmelCase: Union[str, Any] = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
__UpperCAmelCase: int = temp
# diagonal 1
for i in range(1_7 ):
for j in range(1_7 ):
__UpperCAmelCase: List[str] = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
__UpperCAmelCase: Optional[int] = temp
# diagonal 2
for i in range(1_7 ):
for j in range(3 , 2_0 ):
__UpperCAmelCase: Optional[int] = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
__UpperCAmelCase: int = temp
return maximum
if __name__ == "__main__":
print(solution()) | 523 | '''simple docstring'''
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
SCREAMING_SNAKE_CASE_ = 'scheduler_config.json'
class a ( __lowerCAmelCase ):
"""simple docstring"""
__lowerCAmelCase = 1
__lowerCAmelCase = 2
__lowerCAmelCase = 3
__lowerCAmelCase = 4
__lowerCAmelCase = 5
@dataclass
class a ( __lowerCAmelCase ):
"""simple docstring"""
__lowerCAmelCase = 42
class a :
"""simple docstring"""
__lowerCAmelCase = SCHEDULER_CONFIG_NAME
__lowerCAmelCase = ["""dtype"""]
__lowerCAmelCase = []
__lowerCAmelCase = True
@classmethod
def lowercase_ ( cls , snake_case_ = None , snake_case_ = None , snake_case_=False , **snake_case_ , ):
'''simple docstring'''
__UpperCAmelCase, __UpperCAmelCase: List[str] = cls.load_config(
pretrained_model_name_or_path=snake_case_ , subfolder=snake_case_ , return_unused_kwargs=snake_case_ , **snake_case_ , )
__UpperCAmelCase, __UpperCAmelCase: Union[str, Any] = cls.from_config(snake_case_ , return_unused_kwargs=snake_case_ , **snake_case_ )
if hasattr(snake_case_ , """create_state""" ) and getattr(snake_case_ , """has_state""" , snake_case_ ):
__UpperCAmelCase: Union[str, Any] = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def lowercase_ ( self , snake_case_ , snake_case_ = False , **snake_case_ ):
'''simple docstring'''
self.save_config(save_directory=snake_case_ , push_to_hub=snake_case_ , **snake_case_ )
@property
def lowercase_ ( self ):
'''simple docstring'''
return self._get_compatibles()
@classmethod
def lowercase_ ( cls ):
'''simple docstring'''
__UpperCAmelCase: Optional[int] = list(set([cls.__name__] + cls._compatibles ) )
__UpperCAmelCase: Union[str, Any] = importlib.import_module(__name__.split(""".""" )[0] )
__UpperCAmelCase: Union[str, Any] = [
getattr(snake_case_ , snake_case_ ) for c in compatible_classes_str if hasattr(snake_case_ , snake_case_ )
]
return compatible_classes
def UpperCamelCase__ ( _lowercase : jnp.ndarray , _lowercase : Tuple[int] ) -> jnp.ndarray:
assert len(_lowercase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(_lowercase ) - x.ndim) ) , _lowercase )
def UpperCamelCase__ ( _lowercase : int , _lowercase : List[str]=0.9_99 , _lowercase : List[Any]=jnp.floataa ) -> jnp.ndarray:
def alpha_bar(_lowercase : Optional[Any] ):
return math.cos((time_step + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
__UpperCAmelCase: Tuple = []
for i in range(_lowercase ):
__UpperCAmelCase: Any = i / num_diffusion_timesteps
__UpperCAmelCase: Tuple = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(_lowercase ) / alpha_bar(_lowercase ) , _lowercase ) )
return jnp.array(_lowercase , dtype=_lowercase )
@flax.struct.dataclass
class a :
"""simple docstring"""
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = 42
@classmethod
def lowercase_ ( cls , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Optional[int] = scheduler.config
if config.trained_betas is not None:
__UpperCAmelCase: Dict = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
__UpperCAmelCase: Tuple = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__UpperCAmelCase: List[str] = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__UpperCAmelCase: Any = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
F'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''' )
__UpperCAmelCase: List[Any] = 1.0 - betas
__UpperCAmelCase: Tuple = jnp.cumprod(snake_case_ , axis=0 )
return cls(
alphas=snake_case_ , betas=snake_case_ , alphas_cumprod=snake_case_ , )
def UpperCamelCase__ ( _lowercase : CommonSchedulerState , _lowercase : jnp.ndarray , _lowercase : jnp.ndarray , _lowercase : jnp.ndarray ) -> Any:
__UpperCAmelCase: Optional[int] = state.alphas_cumprod
__UpperCAmelCase: Any = alphas_cumprod[timesteps] ** 0.5
__UpperCAmelCase: str = sqrt_alpha_prod.flatten()
__UpperCAmelCase: Union[str, Any] = broadcast_to_shape_from_left(_lowercase , original_samples.shape )
__UpperCAmelCase: List[Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
__UpperCAmelCase: Union[str, Any] = sqrt_one_minus_alpha_prod.flatten()
__UpperCAmelCase: List[str] = broadcast_to_shape_from_left(_lowercase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def UpperCamelCase__ ( _lowercase : CommonSchedulerState , _lowercase : jnp.ndarray , _lowercase : jnp.ndarray , _lowercase : jnp.ndarray ) -> Union[str, Any]:
__UpperCAmelCase, __UpperCAmelCase: Tuple = get_sqrt_alpha_prod(_lowercase , _lowercase , _lowercase , _lowercase )
__UpperCAmelCase: Optional[Any] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def UpperCamelCase__ ( _lowercase : CommonSchedulerState , _lowercase : jnp.ndarray , _lowercase : jnp.ndarray , _lowercase : jnp.ndarray ) -> Optional[Any]:
__UpperCAmelCase, __UpperCAmelCase: Optional[int] = get_sqrt_alpha_prod(_lowercase , _lowercase , _lowercase , _lowercase )
__UpperCAmelCase: List[str] = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity | 523 | 1 |
'''simple docstring'''
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
if dst_width < 0 or dst_height < 0:
raise ValueError('''Destination width/height should be > 0''' )
lowercase__ : Union[str, Any] = img
lowercase__ : List[str] = img.shape[1]
lowercase__ : Dict = img.shape[0]
lowercase__ : str = dst_width
lowercase__ : Tuple = dst_height
lowercase__ : Tuple = self.src_w / self.dst_w
lowercase__ : List[Any] = self.src_h / self.dst_h
lowercase__ : str = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def _lowerCAmelCase( self ) -> Any:
for i in range(self.dst_h ):
for j in range(self.dst_w ):
lowercase__ : Union[str, Any] = self.img[self.get_y(__lowerCAmelCase )][self.get_x(__lowerCAmelCase )]
def _lowerCAmelCase( self , __lowerCAmelCase ) -> int:
return int(self.ratio_x * x )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> int:
return int(self.ratio_y * y )
if __name__ == "__main__":
__a , __a: Dict = 8_00, 6_00
__a: Optional[Any] = imread("""image_data/lena.jpg""", 1)
__a: Optional[int] = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F'Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}', n.output
)
waitKey(0)
destroyAllWindows()
| 428 | '''simple docstring'''
from __future__ import annotations
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
if len(UpperCAmelCase ) == 0:
raise ValueError('''find_max() arg is an empty sequence''' )
if (
left >= len(UpperCAmelCase )
or left < -len(UpperCAmelCase )
or right >= len(UpperCAmelCase )
or right < -len(UpperCAmelCase )
):
raise IndexError('''list index out of range''' )
if left == right:
return nums[left]
lowercase__ : Optional[int] = (left + right) >> 1 # the middle
lowercase__ : Dict = find_max(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) # find max in range[left, mid]
lowercase__ : Any = find_max(UpperCAmelCase , mid + 1 , UpperCAmelCase ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 428 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ = {
'''configuration_pegasus_x''': ['''PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PegasusXConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
'''PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PegasusXForConditionalGeneration''',
'''PegasusXModel''',
'''PegasusXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 508 |
'''simple docstring'''
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
lowercase__ = '''true'''
def __snake_case ( lowercase : Optional[int] , lowercase : Tuple=82 , lowercase : int=16 ):
set_seed(42 )
snake_case_ = RegressionModel()
snake_case_ = deepcopy(lowercase )
snake_case_ = RegressionDataset(length=lowercase )
snake_case_ = DataLoader(lowercase , batch_size=lowercase )
model.to(accelerator.device )
snake_case_ , snake_case_ = accelerator.prepare(lowercase , lowercase )
return model, ddp_model, dataloader
def __snake_case ( lowercase : Accelerator , lowercase : int=False ):
snake_case_ = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased" )
snake_case_ = load_dataset("glue" , "mrpc" , split="validation" )
def tokenize_function(lowercase : Optional[Any] ):
snake_case_ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowercase , max_length=lowercase )
return outputs
with accelerator.main_process_first():
snake_case_ = dataset.map(
lowercase , batched=lowercase , remove_columns=["idx", "sentence1", "sentence2"] , )
snake_case_ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(lowercase : List[Any] ):
if use_longest:
return tokenizer.pad(lowercase , padding="longest" , return_tensors="pt" )
return tokenizer.pad(lowercase , padding="max_length" , max_length=128 , return_tensors="pt" )
return DataLoader(lowercase , shuffle=lowercase , collate_fn=lowercase , batch_size=16 )
def __snake_case ( lowercase : List[Any] , lowercase : str ):
snake_case_ = Accelerator(dispatch_batches=lowercase , split_batches=lowercase )
snake_case_ = get_dataloader(lowercase , not dispatch_batches )
snake_case_ = AutoModelForSequenceClassification.from_pretrained(
"hf-internal-testing/mrpc-bert-base-cased" , return_dict=lowercase )
snake_case_ , snake_case_ = accelerator.prepare(lowercase , lowercase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def __snake_case ( lowercase : List[Any] , lowercase : str , lowercase : Tuple ):
snake_case_ = []
for batch in dataloader:
snake_case_ , snake_case_ = batch.values()
with torch.no_grad():
snake_case_ = model(lowercase )
snake_case_ , snake_case_ = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
snake_case_ , snake_case_ = [], []
for logit, targ in logits_and_targets:
logits.append(lowercase )
targs.append(lowercase )
snake_case_ , snake_case_ = torch.cat(lowercase ), torch.cat(lowercase )
return logits, targs
def __snake_case ( lowercase : Accelerator , lowercase : Union[str, Any]=82 , lowercase : str=False , lowercase : List[Any]=False , lowercase : int=16 ):
snake_case_ , snake_case_ , snake_case_ = get_basic_setup(lowercase , lowercase , lowercase )
snake_case_ , snake_case_ = generate_predictions(lowercase , lowercase , lowercase )
assert (
len(lowercase ) == num_samples
), f'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowercase )}'''
def __snake_case ( lowercase : bool = False , lowercase : bool = False ):
snake_case_ = evaluate.load("glue" , "mrpc" )
snake_case_ , snake_case_ = get_mrpc_setup(lowercase , lowercase )
# First do baseline
snake_case_ , snake_case_ , snake_case_ = setup["no"]
model.to(lowercase )
model.eval()
for batch in dataloader:
batch.to(lowercase )
with torch.inference_mode():
snake_case_ = model(**lowercase )
snake_case_ = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=lowercase , references=batch["labels"] )
snake_case_ = metric.compute()
# Then do distributed
snake_case_ , snake_case_ , snake_case_ = setup["ddp"]
model.eval()
for batch in dataloader:
with torch.inference_mode():
snake_case_ = model(**lowercase )
snake_case_ = outputs.logits.argmax(dim=-1 )
snake_case_ = batch["labels"]
snake_case_ , snake_case_ = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=lowercase , references=lowercase )
snake_case_ = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def __snake_case ( ):
snake_case_ = Accelerator(split_batches=lowercase , dispatch_batches=lowercase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("**Testing gather_for_metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(lowercase , lowercase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test torch metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
snake_case_ = Accelerator(split_batches=lowercase , dispatch_batches=lowercase )
if accelerator.is_local_main_process:
print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(lowercase , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test last batch is not dropped when perfectly divisible**" )
snake_case_ = Accelerator()
test_torch_metrics(lowercase , 512 )
accelerator.state._reset_state()
def __snake_case ( lowercase : int ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 508 | 1 |
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class a :
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase : List[str] , lowerCamelCase : List[str]=2 , lowerCamelCase : List[Any]=32 , lowerCamelCase : Dict=16 , lowerCamelCase : Union[str, Any]=3 , lowerCamelCase : Optional[int]=True , lowerCamelCase : List[str]=True , lowerCamelCase : Dict=32 , lowerCamelCase : Any=4 , lowerCamelCase : Tuple=[0, 1, 2, 3] , lowerCamelCase : Optional[int]=4 , lowerCamelCase : Dict=37 , lowerCamelCase : Optional[Any]="gelu" , lowerCamelCase : List[Any]=0.1 , lowerCamelCase : Tuple=0.1 , lowerCamelCase : Optional[int]=0.02 , lowerCamelCase : Optional[Any]=3 , lowerCamelCase : int=[1, 384, 24, 24] , lowerCamelCase : List[Any]=True , lowerCamelCase : Optional[int]=None , ) -> Tuple:
__snake_case : Optional[Any] = parent
__snake_case : List[Any] = batch_size
__snake_case : Dict = image_size
__snake_case : List[Any] = patch_size
__snake_case : Any = num_channels
__snake_case : List[str] = is_training
__snake_case : Union[str, Any] = use_labels
__snake_case : int = hidden_size
__snake_case : Tuple = num_hidden_layers
__snake_case : str = backbone_out_indices
__snake_case : List[str] = num_attention_heads
__snake_case : str = intermediate_size
__snake_case : Dict = hidden_act
__snake_case : List[Any] = hidden_dropout_prob
__snake_case : Tuple = attention_probs_dropout_prob
__snake_case : Union[str, Any] = initializer_range
__snake_case : Any = num_labels
__snake_case : Optional[int] = backbone_featmap_shape
__snake_case : Dict = scope
__snake_case : List[str] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
__snake_case : Optional[int] = (image_size // patch_size) ** 2
__snake_case : Union[str, Any] = num_patches + 1
def __snake_case ( self : Any ) -> Dict:
__snake_case : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : int = None
if self.use_labels:
__snake_case : List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__snake_case : List[str] = self.get_config()
return config, pixel_values, labels
def __snake_case ( self : Union[str, Any] ) -> Dict:
__snake_case : Union[str, Any] = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [96, 192, 384, 768],
"num_groups": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=lowerCamelCase , backbone_featmap_shape=self.backbone_featmap_shape , )
def __snake_case ( self : Optional[Any] , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : Optional[Any] ) -> Tuple:
__snake_case : Optional[Any] = DPTModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : str = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case ( self : str , lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : Any ) -> Union[str, Any]:
__snake_case : Tuple = self.num_labels
__snake_case : str = DPTForDepthEstimation(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : Union[str, Any] = model(lowerCamelCase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def __snake_case ( self : List[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] ) -> int:
__snake_case : Optional[Any] = self.num_labels
__snake_case : str = DPTForSemanticSegmentation(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : int = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __snake_case ( self : List[str] ) -> str:
__snake_case : List[str] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case : Union[str, Any] = config_and_inputs
__snake_case : List[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class a (_lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
__UpperCAmelCase : int = (
{
"depth-estimation": DPTForDepthEstimation,
"feature-extraction": DPTModel,
"image-segmentation": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCAmelCase : int = False
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : Tuple = False
def __snake_case ( self : Dict ) -> Dict:
__snake_case : int = DPTModelTester(self )
__snake_case : Dict = ConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase , hidden_size=37 )
def __snake_case ( self : Optional[Any] ) -> Union[str, Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="DPT does not use inputs_embeds" )
def __snake_case ( self : str ) -> int:
pass
def __snake_case ( self : Any ) -> str:
__snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Union[str, Any] = model_class(lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__snake_case : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase , nn.Linear ) )
def __snake_case ( self : Tuple ) -> List[str]:
__snake_case , __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Any = model_class(lowerCamelCase )
__snake_case : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : List[str] = [*signature.parameters.keys()]
__snake_case : Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def __snake_case ( self : str ) -> Tuple:
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def __snake_case ( self : Any ) -> Optional[Any]:
__snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*lowerCamelCase )
def __snake_case ( self : str ) -> Optional[Any]:
__snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase )
def __snake_case ( self : Optional[Any] ) -> Optional[Any]:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__snake_case , __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : int = True
if model_class in get_values(lowerCamelCase ):
continue
__snake_case : int = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.train()
__snake_case : Dict = self._prepare_for_class(lowerCamelCase , lowerCamelCase , return_labels=lowerCamelCase )
__snake_case : Dict = model(**lowerCamelCase ).loss
loss.backward()
def __snake_case ( self : str ) -> Optional[int]:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__snake_case , __snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : int = False
__snake_case : Optional[int] = True
if model_class in get_values(lowerCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
__snake_case : str = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.gradient_checkpointing_enable()
model.train()
__snake_case : Optional[Any] = self._prepare_for_class(lowerCamelCase , lowerCamelCase , return_labels=lowerCamelCase )
__snake_case : str = model(**lowerCamelCase ).loss
loss.backward()
def __snake_case ( self : Dict ) -> Dict:
__snake_case , __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Optional[int] = _config_zero_init(lowerCamelCase )
for model_class in self.all_model_classes:
__snake_case : int = model_class(config=lowerCamelCase )
# Skip the check for the backbone
__snake_case : Optional[Any] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
__snake_case : Optional[Any] = [F'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __snake_case ( self : Any ) -> Dict:
pass
@slow
def __snake_case ( self : List[str] ) -> Any:
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
__snake_case : Any = DPTModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def __snake_case ( self : Optional[Any] ) -> str:
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
__snake_case , __snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Optional[int] = "add"
with self.assertRaises(lowerCamelCase ):
__snake_case : Optional[Any] = DPTForDepthEstimation(lowerCamelCase )
def lowerCAmelCase_ ( ):
__snake_case : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
@slow
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Optional[int] ) -> Tuple:
__snake_case : Tuple = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas" )
__snake_case : List[str] = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas" ).to(lowerCamelCase )
__snake_case : List[str] = prepare_img()
__snake_case : List[str] = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__snake_case : List[str] = model(**lowerCamelCase )
__snake_case : Union[str, Any] = outputs.predicted_depth
# verify the predicted depth
__snake_case : List[str] = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , lowerCamelCase )
__snake_case : List[str] = torch.tensor(
[[[5.64_37, 5.61_46, 5.65_11], [5.43_71, 5.56_49, 5.59_58], [5.52_15, 5.51_84, 5.52_93]]] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , lowerCamelCase , atol=1E-4 ) )
| 203 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a (_lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = StableDiffusionXLImgaImgPipeline
__UpperCAmelCase : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
__UpperCAmelCase : Dict = PipelineTesterMixin.required_optional_params - {"latents"}
__UpperCAmelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__UpperCAmelCase : Tuple = IMAGE_TO_IMAGE_IMAGE_PARAMS
__UpperCAmelCase : str = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __snake_case ( self : Optional[Any] ) -> Tuple:
torch.manual_seed(0 )
__snake_case : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , attention_head_dim=(2, 4) , use_linear_projection=lowerCamelCase , addition_embed_type="text_time" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
__snake_case : Tuple = EulerDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , steps_offset=1 , beta_schedule="scaled_linear" , timestep_spacing="leading" , )
torch.manual_seed(0 )
__snake_case : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__snake_case : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=32 , )
__snake_case : List[str] = CLIPTextModel(lowerCamelCase )
__snake_case : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=lowerCamelCase )
__snake_case : List[str] = CLIPTextModelWithProjection(lowerCamelCase )
__snake_case : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=lowerCamelCase )
__snake_case : Optional[int] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_encoder_2": text_encoder_a,
"tokenizer_2": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def __snake_case ( self : Optional[Any] , lowerCamelCase : List[Any] , lowerCamelCase : List[Any]=0 ) -> Union[str, Any]:
__snake_case : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
__snake_case : Any = image / 2 + 0.5
if str(lowerCamelCase ).startswith("mps" ):
__snake_case : Dict = torch.manual_seed(lowerCamelCase )
else:
__snake_case : int = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
__snake_case : Optional[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 5.0,
"output_type": "numpy",
"strength": 0.75,
}
return inputs
def __snake_case ( self : Dict ) -> Any:
__snake_case : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
__snake_case : Any = self.get_dummy_components()
__snake_case : int = StableDiffusionXLImgaImgPipeline(**lowerCamelCase )
__snake_case : List[str] = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
__snake_case : Tuple = self.get_dummy_inputs(lowerCamelCase )
__snake_case : Dict = sd_pipe(**lowerCamelCase ).images
__snake_case : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__snake_case : str = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __snake_case ( self : str ) -> Optional[Any]:
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def __snake_case ( self : Any ) -> Dict:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def __snake_case ( self : str ) -> Optional[int]:
pass
def __snake_case ( self : Tuple ) -> Union[str, Any]:
__snake_case : str = self.get_dummy_components()
__snake_case : List[Any] = StableDiffusionXLImgaImgPipeline(**lowerCamelCase )
__snake_case : Optional[Any] = sd_pipe.to(lowerCamelCase )
__snake_case : int = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
# forward without prompt embeds
__snake_case : List[str] = self.get_dummy_inputs(lowerCamelCase )
__snake_case : str = 3 * ["this is a negative prompt"]
__snake_case : Any = negative_prompt
__snake_case : Optional[Any] = 3 * [inputs["prompt"]]
__snake_case : int = sd_pipe(**lowerCamelCase )
__snake_case : List[Any] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
__snake_case : List[Any] = self.get_dummy_inputs(lowerCamelCase )
__snake_case : Optional[Any] = 3 * ["this is a negative prompt"]
__snake_case : int = 3 * [inputs.pop("prompt" )]
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : Dict = sd_pipe.encode_prompt(lowerCamelCase , negative_prompt=lowerCamelCase )
__snake_case : Tuple = sd_pipe(
**lowerCamelCase , prompt_embeds=lowerCamelCase , negative_prompt_embeds=lowerCamelCase , pooled_prompt_embeds=lowerCamelCase , negative_pooled_prompt_embeds=lowerCamelCase , )
__snake_case : List[str] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Optional[int] ) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : Optional[Any] , lowerCamelCase : Any , lowerCamelCase : Optional[Any]="cpu" , lowerCamelCase : str=torch.floataa , lowerCamelCase : int=0 ) -> Dict:
__snake_case : int = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
__snake_case : Optional[Any] = np.random.RandomState(lowerCamelCase ).standard_normal((1, 4, 64, 64) )
__snake_case : Optional[Any] = torch.from_numpy(lowerCamelCase ).to(device=lowerCamelCase , dtype=lowerCamelCase )
__snake_case : List[str] = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def __snake_case ( self : str ) -> Any:
__snake_case : List[str] = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-base" )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__snake_case : int = self.get_inputs(lowerCamelCase )
__snake_case : Optional[Any] = pipe(**lowerCamelCase ).images
__snake_case : Any = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__snake_case : Optional[int] = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 203 | 1 |
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> float:
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(lowerCamelCase__ , lowerCamelCase__ ) ) )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> list[list[list[float] | float]]:
if dataset.ndim != value_array.ndim:
snake_case__ = (
"Wrong input data's dimensions... "
F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(lowerCamelCase__ )
try:
if dataset.shape[1] != value_array.shape[1]:
snake_case__ = (
"Wrong input data's shape... "
F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(lowerCamelCase__ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('''Wrong shape''' )
if dataset.dtype != value_array.dtype:
snake_case__ = (
"Input data have different datatype... "
F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(lowerCamelCase__ )
snake_case__ = []
for value in value_array:
snake_case__ = euclidean(lowerCamelCase__ , dataset[0] )
snake_case__ = dataset[0].tolist()
for dataset_value in dataset[1:]:
snake_case__ = euclidean(lowerCamelCase__ , lowerCamelCase__ )
if dist > temp_dist:
snake_case__ = temp_dist
snake_case__ = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> float:
return np.dot(lowerCamelCase__ , lowerCamelCase__ ) / (norm(lowerCamelCase__ ) * norm(lowerCamelCase__ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 33 |
"""simple docstring"""
def _lowerCAmelCase ( lowerCamelCase__ : int = 1_0_0 ) -> int:
_SCREAMING_SNAKE_CASE : Any = set()
_SCREAMING_SNAKE_CASE : Any = 0
_SCREAMING_SNAKE_CASE : Tuple = n + 1 # maximum limit
for a in range(2, lowerCamelCase__ ):
for b in range(2, lowerCamelCase__ ):
_SCREAMING_SNAKE_CASE : int = a**b # calculates the current power
collect_powers.add(lowerCamelCase__ ) # adds the result to the set
return len(lowerCamelCase__ )
if __name__ == "__main__":
print('''Number of terms ''', solution(int(str(input()).strip())))
| 572 | 0 |
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
snake_case = logging.get_logger(__name__)
class A_ ( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Tuple ,*__A : Any ,**__A : str ) -> None:
warnings.warn(
'The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PerceiverImageProcessor instead.' ,__A ,)
super().__init__(*__A ,**__A ) | 721 |
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
snake_case = logging.get_logger(__name__)
class A_ ( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Dict ,*__A : Optional[int] ,**__A : List[Any] ) -> None:
warnings.warn(
'The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DonutImageProcessor instead.' ,__A ,)
super().__init__(*__A ,**__A ) | 535 | 0 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case : Tuple = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__snake_case : List[str] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.encoder.norm.weight', 'encoder.layernorm.weight'),
('transformer.encoder.norm.bias', 'encoder.layernorm.bias'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
]
)
def __lowerCamelCase ( __snake_case : str, __snake_case : Dict, __snake_case : List[str] ) -> Any:
"""simple docstring"""
A__ : List[Any] =state_dict.pop(__snake_case )
A__ : str =val
def __lowerCamelCase ( __snake_case : Any ) -> Any:
"""simple docstring"""
A__ : int =OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
A__ : Dict =key.replace("""backbone.0.body""", """backbone.conv_encoder.model""" )
A__ : Union[str, Any] =value
else:
A__ : Optional[int] =value
return new_state_dict
def __lowerCamelCase ( __snake_case : Dict ) -> int:
"""simple docstring"""
A__ : int =""""""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
A__ : Optional[int] =state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
A__ : Union[str, Any] =state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
A__ : str =in_proj_weight[:256, :]
A__ : Any =in_proj_bias[:256]
A__ : Union[str, Any] =in_proj_weight[256:512, :]
A__ : Optional[int] =in_proj_bias[256:512]
A__ : List[Any] =in_proj_weight[-256:, :]
A__ : Any =in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
A__ : Optional[Any] =state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight" )
A__ : Dict =state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
A__ : int =in_proj_weight[:256, :]
A__ : Tuple =in_proj_bias[:256]
A__ : List[str] =in_proj_weight[256:512, :]
A__ : Any =in_proj_bias[256:512]
A__ : Optional[int] =in_proj_weight[-256:, :]
A__ : Union[str, Any] =in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
A__ : List[str] =state_dict.pop(
f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight" )
A__ : Union[str, Any] =state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
A__ : int =in_proj_weight_cross_attn[:256, :]
A__ : Optional[int] =in_proj_bias_cross_attn[:256]
A__ : int =in_proj_weight_cross_attn[256:512, :]
A__ : Dict =in_proj_bias_cross_attn[256:512]
A__ : List[str] =in_proj_weight_cross_attn[-256:, :]
A__ : List[Any] =in_proj_bias_cross_attn[-256:]
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : Dict ) -> Optional[Any]:
"""simple docstring"""
A__ , A__ : List[str] =image.size
A__ : str =max(__snake_case, __snake_case )
A__ : str =800 if """detection""" in checkpoint_url else 1_000
A__ : Union[str, Any] =target_max_size / current_max_size
A__ : int =image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def __lowerCamelCase ( __snake_case : List[str] ) -> Tuple:
"""simple docstring"""
A__ : int =F.to_tensor(__snake_case )
A__ : Union[str, Any] =F.normalize(__snake_case, mean=[0.4_85, 0.4_56, 0.4_06], std=[0.2_29, 0.2_24, 0.2_25] )
return image
@torch.no_grad()
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : int, __snake_case : List[str] ) -> str:
"""simple docstring"""
logger.info("""Converting model...""" )
# load original state dict
A__ : Dict =torch.hub.load_state_dict_from_url(__snake_case, map_location="""cpu""" )
# rename keys
for src, dest in rename_keys:
rename_key(__snake_case, __snake_case, __snake_case )
A__ : str =rename_backbone_keys(__snake_case )
# query, key and value matrices need special treatment
read_in_q_k_v(__snake_case )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
A__ : List[str] ="""model."""
for key in state_dict.copy().keys():
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
A__ : Tuple =state_dict.pop(__snake_case )
A__ : str =val
# create HuggingFace model and load state dict
A__ : Any =TableTransformerConfig(
backbone="""resnet18""", mask_loss_coefficient=1, dice_loss_coefficient=1, ce_loss_coefficient=1, bbox_loss_coefficient=5, giou_loss_coefficient=2, eos_coefficient=0.4, class_cost=1, bbox_cost=5, giou_cost=2, )
if "detection" in checkpoint_url:
A__ : Dict =15
A__ : Optional[int] =2
A__ : Union[str, Any] ={0: """table""", 1: """table rotated"""}
A__ : Optional[Any] =idalabel
A__ : List[str] ={v: k for k, v in idalabel.items()}
else:
A__ : str =125
A__ : Tuple =6
A__ : List[Any] ={
0: """table""",
1: """table column""",
2: """table row""",
3: """table column header""",
4: """table projected row header""",
5: """table spanning cell""",
}
A__ : Tuple =idalabel
A__ : Dict ={v: k for k, v in idalabel.items()}
A__ : List[str] =DetrImageProcessor(
format="""coco_detection""", max_size=800 if """detection""" in checkpoint_url else 1_000 )
A__ : Any =TableTransformerForObjectDetection(__snake_case )
model.load_state_dict(__snake_case )
model.eval()
# verify our conversion
A__ : Dict ="""example_pdf.png""" if """detection""" in checkpoint_url else """example_table.png"""
A__ : Union[str, Any] =hf_hub_download(repo_id="""nielsr/example-pdf""", repo_type="""dataset""", filename=__snake_case )
A__ : List[str] =Image.open(__snake_case ).convert("""RGB""" )
A__ : str =normalize(resize(__snake_case, __snake_case ) ).unsqueeze(0 )
A__ : Tuple =model(__snake_case )
if "detection" in checkpoint_url:
A__ : Optional[Any] =(1, 15, 3)
A__ : List[Any] =torch.tensor(
[[-6.78_97, -16.99_85, 6.79_37], [-8.01_86, -22.21_92, 6.96_77], [-7.31_17, -21.07_08, 7.40_55]] )
A__ : List[Any] =torch.tensor([[0.48_67, 0.17_67, 0.67_32], [0.67_18, 0.44_79, 0.38_30], [0.47_16, 0.17_60, 0.63_64]] )
else:
A__ : int =(1, 125, 7)
A__ : Optional[int] =torch.tensor(
[[-18.14_30, -8.32_14, 4.82_74], [-18.46_85, -7.13_61, -4.26_67], [-26.36_93, -9.34_29, -4.99_62]] )
A__ : Optional[Any] =torch.tensor([[0.49_83, 0.55_95, 0.94_40], [0.49_16, 0.63_15, 0.59_54], [0.61_08, 0.86_37, 0.11_35]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3], __snake_case, atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3], __snake_case, atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
model.save_pretrained(__snake_case )
image_processor.save_pretrained(__snake_case )
if push_to_hub:
# Push model to HF hub
logger.info("""Pushing model to the hub...""" )
A__ : Optional[Any] =(
"""microsoft/table-transformer-detection"""
if """detection""" in checkpoint_url
else """microsoft/table-transformer-structure-recognition"""
)
model.push_to_hub(__snake_case )
image_processor.push_to_hub(__snake_case )
if __name__ == "__main__":
__snake_case : int = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
type=str,
choices=[
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth',
],
help='URL of the Table Transformer checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__snake_case : Optional[int] = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 215 |
'''simple docstring'''
__snake_case : Optional[Any] = 8.314462 # Unit - J mol-1 K-1
def __lowerCamelCase ( __snake_case : float, __snake_case : float, __snake_case : float ) -> float:
"""simple docstring"""
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def __lowerCamelCase ( __snake_case : float, __snake_case : float, __snake_case : float ) -> float:
"""simple docstring"""
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 215 | 1 |
'''simple docstring'''
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
_SCREAMING_SNAKE_CASE : int = (
'''https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'''
)
_SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = '''https://pypi.org/pypi/diffusers/json'''
SCREAMING_SNAKE_CASE__ = json.loads(request.urlopen(_A ).read() )['''releases'''].keys()
return sorted(_A , key=lambda _A : version.Version(_A ) )
def UpperCAmelCase_ ( ):
'''simple docstring'''
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(_A )
os.makedirs(_A , exist_ok=_A )
SCREAMING_SNAKE_CASE__ = Path(_A ) / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
init_hf_modules()
SCREAMING_SNAKE_CASE__ = Path(_A ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(_A , exist_ok=_A )
SCREAMING_SNAKE_CASE__ = dynamic_module_path / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
with open(_A , '''r''' , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE__ = f.read()
# Imports of the form `import .xxx`
SCREAMING_SNAKE_CASE__ = re.findall('''^\s*import\s+\.(\S+)\s*$''' , _A , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' , _A , flags=re.MULTILINE )
# Unique-ify
return list(set(_A ) )
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = [module_file]
SCREAMING_SNAKE_CASE__ = []
# Let's recurse through all relative imports
while not no_change:
SCREAMING_SNAKE_CASE__ = []
for f in files_to_check:
new_imports.extend(get_relative_imports(_A ) )
SCREAMING_SNAKE_CASE__ = Path(_A ).parent
SCREAMING_SNAKE_CASE__ = [str(module_path / m ) for m in new_imports]
SCREAMING_SNAKE_CASE__ = [f for f in new_import_files if f not in all_relative_imports]
SCREAMING_SNAKE_CASE__ = [F'''{f}.py''' for f in new_import_files]
SCREAMING_SNAKE_CASE__ = len(_A ) == 0
all_relative_imports.extend(_A )
return all_relative_imports
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
with open(_A , '''r''' , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE__ = f.read()
# Imports of the form `import xxx`
SCREAMING_SNAKE_CASE__ = re.findall('''^\s*import\s+(\S+)\s*$''' , _A , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('''^\s*from\s+(\S+)\s+import''' , _A , flags=re.MULTILINE )
# Only keep the top-level module
SCREAMING_SNAKE_CASE__ = [imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )]
# Unique-ify and test we got them all
SCREAMING_SNAKE_CASE__ = list(set(_A ) )
SCREAMING_SNAKE_CASE__ = []
for imp in imports:
try:
importlib.import_module(_A )
except ImportError:
missing_packages.append(_A )
if len(_A ) > 0:
raise ImportError(
'''This modeling file requires the following packages that were not found in your environment: '''
F'''{", ".join(_A )}. Run `pip install {" ".join(_A )}`''' )
return get_relative_imports(_A )
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = module_path.replace(os.path.sep , '''.''' )
SCREAMING_SNAKE_CASE__ = importlib.import_module(_A )
if class_name is None:
return find_pipeline_class(_A )
return getattr(_A , _A )
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
from ..pipelines import DiffusionPipeline
SCREAMING_SNAKE_CASE__ = dict(inspect.getmembers(_A , inspect.isclass ) )
SCREAMING_SNAKE_CASE__ = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , _A )
and cls.__module__.split('''.''' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F'''Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'''
F''' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'''
F''' {loaded_module}.''' )
SCREAMING_SNAKE_CASE__ = cls
return pipeline_class
def UpperCAmelCase_ ( _A , _A , _A = None , _A = False , _A = False , _A = None , _A = None , _A = None , _A = False , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = str(_A )
SCREAMING_SNAKE_CASE__ = os.path.join(_A , _A )
if os.path.isfile(_A ):
SCREAMING_SNAKE_CASE__ = module_file_or_url
SCREAMING_SNAKE_CASE__ = '''local'''
elif pretrained_model_name_or_path.count('''/''' ) == 0:
SCREAMING_SNAKE_CASE__ = get_diffusers_versions()
# cut ".dev0"
SCREAMING_SNAKE_CASE__ = '''v''' + '''.'''.join(__version__.split('''.''' )[:3] )
# retrieve github version that matches
if revision is None:
SCREAMING_SNAKE_CASE__ = latest_version if latest_version[1:] in available_versions else '''main'''
logger.info(F'''Defaulting to latest_version: {revision}.''' )
elif revision in available_versions:
SCREAMING_SNAKE_CASE__ = F'''v{revision}'''
elif revision == "main":
SCREAMING_SNAKE_CASE__ = revision
else:
raise ValueError(
F'''`custom_revision`: {revision} does not exist. Please make sure to choose one of'''
F''' {", ".join(available_versions + ["main"] )}.''' )
# community pipeline on GitHub
SCREAMING_SNAKE_CASE__ = COMMUNITY_PIPELINES_URL.format(revision=_A , pipeline=_A )
try:
SCREAMING_SNAKE_CASE__ = cached_download(
_A , cache_dir=_A , force_download=_A , proxies=_A , resume_download=_A , local_files_only=_A , use_auth_token=_A , )
SCREAMING_SNAKE_CASE__ = '''git'''
SCREAMING_SNAKE_CASE__ = pretrained_model_name_or_path + '''.py'''
except EnvironmentError:
logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' )
raise
else:
try:
# Load from URL or cache if already cached
SCREAMING_SNAKE_CASE__ = hf_hub_download(
_A , _A , cache_dir=_A , force_download=_A , proxies=_A , resume_download=_A , local_files_only=_A , use_auth_token=_A , )
SCREAMING_SNAKE_CASE__ = os.path.join('''local''' , '''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) )
except EnvironmentError:
logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' )
raise
# Check we have all the requirements in our environment
SCREAMING_SNAKE_CASE__ = check_imports(_A )
# Now we move the module inside our cached dynamic modules.
SCREAMING_SNAKE_CASE__ = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(_A )
SCREAMING_SNAKE_CASE__ = Path(_A ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(_A , submodule_path / module_file )
for module_needed in modules_needed:
SCREAMING_SNAKE_CASE__ = F'''{module_needed}.py'''
shutil.copy(os.path.join(_A , _A ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(_A , _A ):
SCREAMING_SNAKE_CASE__ = use_auth_token
elif use_auth_token is True:
SCREAMING_SNAKE_CASE__ = HfFolder.get_token()
else:
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = model_info(_A , revision=_A , token=_A ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
SCREAMING_SNAKE_CASE__ = submodule_path / commit_hash
SCREAMING_SNAKE_CASE__ = full_submodule + os.path.sep + commit_hash
create_dynamic_module(_A )
if not (submodule_path / module_file).exists():
shutil.copy(_A , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
_A , F'''{module_needed}.py''' , cache_dir=_A , force_download=_A , resume_download=_A , proxies=_A , use_auth_token=_A , revision=_A , local_files_only=_A , )
return os.path.join(_A , _A )
def UpperCAmelCase_ ( _A , _A , _A = None , _A = None , _A = False , _A = False , _A = None , _A = None , _A = None , _A = False , **_A , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = get_cached_module_file(
_A , _A , cache_dir=_A , force_download=_A , resume_download=_A , proxies=_A , use_auth_token=_A , revision=_A , local_files_only=_A , )
return get_class_in_module(_A , final_module.replace('''.py''' , '''''' ) )
| 721 |
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
for data in source_data:
for i, el in enumerate(_A ):
if len(_A ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(_A ) )
return data_lists
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
for dlist, weight in zip(_A , _A ):
SCREAMING_SNAKE_CASE__ = min(_A )
SCREAMING_SNAKE_CASE__ = max(_A )
SCREAMING_SNAKE_CASE__ = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
SCREAMING_SNAKE_CASE__ = F'''Invalid weight of {weight:f} provided'''
raise ValueError(_A )
score_lists.append(_A )
return score_lists
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(_A ):
SCREAMING_SNAKE_CASE__ = final_scores[j] + ele
return final_scores
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = get_data(_A )
SCREAMING_SNAKE_CASE__ = calculate_each_score(_A , _A )
SCREAMING_SNAKE_CASE__ = generate_final_scores(_A )
# append scores to source data
for i, ele in enumerate(_A ):
source_data[i].append(_A )
return source_data
| 472 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=7 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=99 , UpperCamelCase_=32 , UpperCamelCase_=5 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=5_12 , UpperCamelCase_=16 , UpperCamelCase_=2 , UpperCamelCase_=0.0_2 , UpperCamelCase_=4 , ) -> Any:
__lowercase : List[Any] = parent
__lowercase : Any = batch_size
__lowercase : Dict = seq_length
__lowercase : List[Any] = is_training
__lowercase : Any = use_attention_mask
__lowercase : Union[str, Any] = use_token_type_ids
__lowercase : int = use_labels
__lowercase : List[Any] = vocab_size
__lowercase : Union[str, Any] = hidden_size
__lowercase : Optional[Any] = num_hidden_layers
__lowercase : Union[str, Any] = num_attention_heads
__lowercase : Dict = intermediate_size
__lowercase : Tuple = hidden_act
__lowercase : Any = hidden_dropout_prob
__lowercase : Any = attention_probs_dropout_prob
__lowercase : List[Any] = max_position_embeddings
__lowercase : Optional[int] = type_vocab_size
__lowercase : Any = type_sequence_label_size
__lowercase : List[Any] = initializer_range
__lowercase : Union[str, Any] = num_choices
def _lowerCamelCase ( self ) -> int:
__lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : str = None
if self.use_attention_mask:
__lowercase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : Tuple = None
if self.use_token_type_ids:
__lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase : Any = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowerCamelCase ( self ) -> Dict:
__lowercase : Dict = self.prepare_config_and_inputs()
__lowercase ,__lowercase ,__lowercase ,__lowercase : List[Any] = config_and_inputs
__lowercase : str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase_ ( snake_case , unittest.TestCase ):
UpperCamelCase =True
UpperCamelCase =(
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCamelCase ( self ) -> Tuple:
__lowercase : int = FlaxRoFormerModelTester(self )
@slow
def _lowerCamelCase ( self ) -> Tuple:
for model_class_name in self.all_model_classes:
__lowercase : Optional[Any] = model_class_name.from_pretrained('''junnyu/roformer_chinese_small''' , from_pt=UpperCamelCase_ )
__lowercase : Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase_ )
@require_flax
class UpperCAmelCase_ ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self ) -> List[Any]:
__lowercase : Tuple = FlaxRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
__lowercase : str = jnp.array([[0, 1, 2, 3, 4, 5]] )
__lowercase : List[Any] = model(UpperCamelCase_ )[0]
__lowercase : Optional[int] = 5_00_00
__lowercase : Union[str, Any] = (1, 6, vocab_size)
self.assertEqual(output.shape , UpperCamelCase_ )
__lowercase : List[Any] = jnp.array(
[[[-0.1_2_0_5, -1.0_2_6_5, 0.2_9_2_2], [-1.5_1_3_4, 0.1_9_7_4, 0.1_5_1_9], [-5.0_1_3_5, -3.9_0_0_3, -0.8_4_0_4]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
| 76 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
__lowerCAmelCase = """"""
__lowerCAmelCase = """"""
__lowerCAmelCase = """"""
__lowerCAmelCase = 1 # (0 is vertical, 1 is horizontal)
def UpperCAmelCase_ ():
"""simple docstring"""
_a, _a : List[str] = get_dataset(__a , __a )
print('Processing...' )
_a, _a, _a : Optional[Any] = update_image_and_anno(__a , __a , __a )
for index, image in enumerate(__a ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_a : int = random_chars(3_2 )
_a : List[Any] = paths[index].split(os.sep )[-1].rsplit('.' , 1 )[0]
_a : Optional[Any] = f"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(f"""/{file_root}.jpg""" , __a , [cva.IMWRITE_JPEG_QUALITY, 8_5] )
print(f"""Success {index+1}/{len(__a )} with {file_name}""" )
_a : Dict = []
for anno in new_annos[index]:
_a : Dict = f"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(__a )
with open(f"""/{file_root}.txt""" , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def UpperCAmelCase_ (__a : str , __a : str ):
"""simple docstring"""
_a : Optional[int] = []
_a : Optional[int] = []
for label_file in glob.glob(os.path.join(__a , '*.txt' ) ):
_a : List[str] = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(__a ) as in_file:
_a : Tuple = in_file.readlines()
_a : Any = os.path.join(__a , f"""{label_name}.jpg""" )
_a : str = []
for obj_list in obj_lists:
_a : Dict = obj_list.rstrip('\n' ).split(' ' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__a )
labels.append(__a )
return img_paths, labels
def UpperCAmelCase_ (__a : list , __a : list , __a : int = 1 ):
"""simple docstring"""
_a : List[Any] = []
_a : List[Any] = []
_a : str = []
for idx in range(len(__a ) ):
_a : Dict = []
_a : Union[str, Any] = img_list[idx]
path_list.append(__a )
_a : str = anno_list[idx]
_a : Dict = cva.imread(__a )
if flip_type == 1:
_a : Tuple = cva.flip(__a , __a )
for bbox in img_annos:
_a : List[Any] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
_a : List[Any] = cva.flip(__a , __a )
for bbox in img_annos:
_a : Tuple = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__a )
new_imgs_list.append(__a )
return new_imgs_list, new_annos_lists, path_list
def UpperCAmelCase_ (__a : int = 3_2 ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
_a : Dict = ascii_lowercase + digits
return "".join(random.choice(__a ) for _ in range(__a ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 229 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ : int = {
'configuration_instructblip': [
'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InstructBlipConfig',
'InstructBlipQFormerConfig',
'InstructBlipVisionConfig',
],
'processing_instructblip': ['InstructBlipProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : List[str] = [
'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'InstructBlipQFormerModel',
'InstructBlipPreTrainedModel',
'InstructBlipForConditionalGeneration',
'InstructBlipVisionModel',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 500 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : int ):
A__ = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def _snake_case ( UpperCAmelCase_ : int ):
A__ = 0
while number > 0:
A__ = number % 10
sum_of_digits += last_digit
A__ = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def _snake_case ( UpperCAmelCase_ : int = 100 ):
A__ = factorial(UpperCAmelCase_ )
A__ = split_and_add(UpperCAmelCase_ )
return result
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 500 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.