code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE :Dict = logging.get_logger(__name__)
# TODO Update this
SCREAMING_SNAKE_CASE :Dict = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = "esm"
def __init__( self : List[str] ,A : Dict=None ,A : Tuple=None ,A : Any=None ,A : Optional[Any]=7_68 ,A : Tuple=12 ,A : List[str]=12 ,A : Tuple=30_72 ,A : List[str]=0.1 ,A : List[Any]=0.1 ,A : int=10_26 ,A : List[str]=0.02 ,A : Union[str, Any]=1E-12 ,A : List[Any]="absolute" ,A : List[Any]=True ,A : Union[str, Any]=None ,A : Optional[int]=False ,A : Dict=False ,A : Tuple=None ,A : Optional[int]=None ,**A : List[Any] ,):
super().__init__(pad_token_id=A ,mask_token_id=A ,**A )
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = initializer_range
__A = layer_norm_eps
__A = position_embedding_type
__A = use_cache
__A = emb_layer_norm_before
__A = token_dropout
__A = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
__A = EsmFoldConfig()
elif isinstance(A ,A ):
__A = EsmFoldConfig(**A )
__A = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
__A = get_default_vocab_list()
else:
__A = vocab_list
else:
__A = None
__A = None
if self.esmfold_config is not None and getattr(self.esmfold_config ,"use_esm_attn_map" ,A ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def UpperCamelCase_ ( self : Optional[int] ):
__A = super().to_dict()
if isinstance(self.esmfold_config ,A ):
__A = self.esmfold_config.to_dict()
return output
@dataclass
class UpperCAmelCase :
'''simple docstring'''
snake_case_ = None
snake_case_ = True
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = 0
snake_case_ = True
snake_case_ = False
snake_case_ = 128
snake_case_ = None
def UpperCamelCase_ ( self : List[Any] ):
if self.trunk is None:
__A = TrunkConfig()
elif isinstance(self.trunk ,A ):
__A = TrunkConfig(**self.trunk )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = asdict(self )
__A = self.trunk.to_dict()
return output
@dataclass
class UpperCAmelCase :
'''simple docstring'''
snake_case_ = 48
snake_case_ = 1024
snake_case_ = 128
snake_case_ = 32
snake_case_ = 32
snake_case_ = 32
snake_case_ = 0
snake_case_ = 0
snake_case_ = False
snake_case_ = 4
snake_case_ = 128
snake_case_ = None
def UpperCamelCase_ ( self : List[Any] ):
if self.structure_module is None:
__A = StructureModuleConfig()
elif isinstance(self.structure_module ,A ):
__A = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
f''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
f''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
__A = self.sequence_state_dim // self.sequence_head_width
__A = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
f''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
f''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(f'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def UpperCamelCase_ ( self : Tuple ):
__A = asdict(self )
__A = self.structure_module.to_dict()
return output
@dataclass
class UpperCAmelCase :
'''simple docstring'''
snake_case_ = 384
snake_case_ = 128
snake_case_ = 16
snake_case_ = 128
snake_case_ = 12
snake_case_ = 4
snake_case_ = 8
snake_case_ = 0.1
snake_case_ = 8
snake_case_ = 1
snake_case_ = 2
snake_case_ = 7
snake_case_ = 10
snake_case_ = 1E-8
snake_case_ = 1E5
def UpperCamelCase_ ( self : Union[str, Any] ):
return asdict(self )
def UpperCAmelCase ( ) -> int:
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 55 |
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def lowercase__ ( _UpperCamelCase) -> List[str]:
"""simple docstring"""
UpperCamelCase = torch.exp(_UpperCamelCase)
UpperCamelCase = torch.sum(_UpperCamelCase , dim=1) # sum of exp(x_i)
UpperCamelCase = torch.sum(x * exp_x , dim=1) # sum of x_i * exp(x_i)
return torch.log(_UpperCamelCase) - B / A
class A__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
super().__init__()
UpperCamelCase = config.output_attentions
UpperCamelCase = config.output_hidden_states
UpperCamelCase = nn.ModuleList([BertLayer(_SCREAMING_SNAKE_CASE ) for _ in range(config.num_hidden_layers )] )
UpperCamelCase = nn.ModuleList([BertHighway(_SCREAMING_SNAKE_CASE ) for _ in range(config.num_hidden_layers )] )
UpperCamelCase = [-1 for _ in range(config.num_hidden_layers )]
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
if (type(_SCREAMING_SNAKE_CASE ) is float) or (type(_SCREAMING_SNAKE_CASE ) is int):
for i in range(len(self.early_exit_entropy ) ):
UpperCamelCase = x
else:
UpperCamelCase = x
def _SCREAMING_SNAKE_CASE ( self : str , _SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
UpperCamelCase = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any]=None , _SCREAMING_SNAKE_CASE : int=None , _SCREAMING_SNAKE_CASE : int=None , _SCREAMING_SNAKE_CASE : List[Any]=None , ):
"""simple docstring"""
UpperCamelCase = ()
UpperCamelCase = ()
UpperCamelCase = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
UpperCamelCase = all_hidden_states + (hidden_states,)
UpperCamelCase = layer_module(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , head_mask[i] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = layer_outputs[0]
if self.output_attentions:
UpperCamelCase = all_attentions + (layer_outputs[1],)
UpperCamelCase = (hidden_states,)
if self.output_hidden_states:
UpperCamelCase = current_outputs + (all_hidden_states,)
if self.output_attentions:
UpperCamelCase = current_outputs + (all_attentions,)
UpperCamelCase = self.highway[i](_SCREAMING_SNAKE_CASE )
# logits, pooled_output
if not self.training:
UpperCamelCase = highway_exit[0]
UpperCamelCase = entropy(_SCREAMING_SNAKE_CASE )
UpperCamelCase = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
UpperCamelCase = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
UpperCamelCase = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(_SCREAMING_SNAKE_CASE , i + 1 )
else:
UpperCamelCase = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
UpperCamelCase = all_hidden_states + (hidden_states,)
UpperCamelCase = (hidden_states,)
if self.output_hidden_states:
UpperCamelCase = outputs + (all_hidden_states,)
if self.output_attentions:
UpperCamelCase = outputs + (all_attentions,)
UpperCamelCase = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"""The Bert Model transformer with early exiting (DeeBERT). """ , __snake_case , )
class A__ ( __snake_case ):
'''simple docstring'''
def __init__( self : Any , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
super().__init__(_SCREAMING_SNAKE_CASE )
UpperCamelCase = config
UpperCamelCase = BertEmbeddings(_SCREAMING_SNAKE_CASE )
UpperCamelCase = DeeBertEncoder(_SCREAMING_SNAKE_CASE )
UpperCamelCase = BertPooler(_SCREAMING_SNAKE_CASE )
self.init_weights()
def _SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
self.encoder.init_highway_pooler(self.pooler )
def _SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
return self.embeddings.word_embeddings
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
UpperCamelCase = value
def _SCREAMING_SNAKE_CASE ( self : List[str] , _SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(_SCREAMING_SNAKE_CASE )
@add_start_docstrings_to_model_forward(_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[Any]=None , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : Optional[Any]=None , _SCREAMING_SNAKE_CASE : Tuple=None , _SCREAMING_SNAKE_CASE : Optional[Any]=None , _SCREAMING_SNAKE_CASE : Optional[Any]=None , _SCREAMING_SNAKE_CASE : List[Any]=None , _SCREAMING_SNAKE_CASE : Dict=None , ):
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
UpperCamelCase = input_ids.size()
elif inputs_embeds is not None:
UpperCamelCase = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
UpperCamelCase = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
UpperCamelCase = torch.ones(_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE )
if encoder_attention_mask is None:
UpperCamelCase = torch.ones(_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE )
if token_type_ids is None:
UpperCamelCase = torch.zeros(_SCREAMING_SNAKE_CASE , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
UpperCamelCase = self.get_extended_attention_mask(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
UpperCamelCase = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
UpperCamelCase = encoder_attention_mask[:, None, None, :]
UpperCamelCase = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
UpperCamelCase = (1.0 - encoder_extended_attention_mask) * -1_0_0_0_0.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
UpperCamelCase = self.get_head_mask(_SCREAMING_SNAKE_CASE , self.config.num_hidden_layers )
UpperCamelCase = self.embeddings(
input_ids=_SCREAMING_SNAKE_CASE , position_ids=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , inputs_embeds=_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.encoder(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , head_mask=_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , encoder_attention_mask=_SCREAMING_SNAKE_CASE , )
UpperCamelCase = encoder_outputs[0]
UpperCamelCase = self.pooler(_SCREAMING_SNAKE_CASE )
UpperCamelCase = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class A__ ( __snake_case ):
'''simple docstring'''
def __init__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
UpperCamelCase = message
UpperCamelCase = exit_layer # start from 1!
class A__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
super().__init__()
UpperCamelCase = BertPooler(_SCREAMING_SNAKE_CASE )
UpperCamelCase = nn.Dropout(config.hidden_dropout_prob )
UpperCamelCase = nn.Linear(config.hidden_size , config.num_labels )
def _SCREAMING_SNAKE_CASE ( self : Tuple , _SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = encoder_outputs[0]
UpperCamelCase = self.pooler(_SCREAMING_SNAKE_CASE )
# "return" pooler_output
# BertModel
UpperCamelCase = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
UpperCamelCase = bmodel_output[1]
UpperCamelCase = self.dropout(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.classifier(_SCREAMING_SNAKE_CASE )
return logits, pooled_output
@add_start_docstrings(
"""Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. """ , __snake_case , )
class A__ ( __snake_case ):
'''simple docstring'''
def __init__( self : List[str] , _SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
super().__init__(_SCREAMING_SNAKE_CASE )
UpperCamelCase = config.num_labels
UpperCamelCase = config.num_hidden_layers
UpperCamelCase = DeeBertModel(_SCREAMING_SNAKE_CASE )
UpperCamelCase = nn.Dropout(config.hidden_dropout_prob )
UpperCamelCase = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : str , _SCREAMING_SNAKE_CASE : Any=None , _SCREAMING_SNAKE_CASE : int=None , _SCREAMING_SNAKE_CASE : Tuple=None , _SCREAMING_SNAKE_CASE : List[Any]=None , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : Dict=None , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : str=-1 , _SCREAMING_SNAKE_CASE : str=False , ):
"""simple docstring"""
UpperCamelCase = self.num_layers
try:
UpperCamelCase = self.bert(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , position_ids=_SCREAMING_SNAKE_CASE , head_mask=_SCREAMING_SNAKE_CASE , inputs_embeds=_SCREAMING_SNAKE_CASE , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
UpperCamelCase = outputs[1]
UpperCamelCase = self.dropout(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.classifier(_SCREAMING_SNAKE_CASE )
UpperCamelCase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
UpperCamelCase = e.message
UpperCamelCase = e.exit_layer
UpperCamelCase = outputs[0]
if not self.training:
UpperCamelCase = entropy(_SCREAMING_SNAKE_CASE )
UpperCamelCase = []
UpperCamelCase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
UpperCamelCase = MSELoss()
UpperCamelCase = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
UpperCamelCase = CrossEntropyLoss()
UpperCamelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
UpperCamelCase = []
for highway_exit in outputs[-1]:
UpperCamelCase = highway_exit[0]
if not self.training:
highway_logits_all.append(_SCREAMING_SNAKE_CASE )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
UpperCamelCase = MSELoss()
UpperCamelCase = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
UpperCamelCase = CrossEntropyLoss()
UpperCamelCase = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_SCREAMING_SNAKE_CASE )
if train_highway:
UpperCamelCase = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
UpperCamelCase = (loss,) + outputs
if not self.training:
UpperCamelCase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
UpperCamelCase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 280 | 0 |
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def lowerCamelCase_ ( ):
"""simple docstring"""
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--model_ckpt''' , type=A , default='''microsoft/unixcoder-base-nine''' )
parser.add_argument('''--num_epochs''' , type=A , default=5 )
parser.add_argument('''--batch_size''' , type=A , default=6 )
parser.add_argument('''--gradient_accumulation_steps''' , type=A , default=1 )
parser.add_argument('''--freeze''' , type=A , default=A )
parser.add_argument('''--learning_rate''' , type=A , default=5e-4 )
parser.add_argument('''--seed''' , type=A , default=0 )
parser.add_argument('''--lr_scheduler_type''' , type=A , default='''cosine''' )
parser.add_argument('''--num_warmup_steps''' , type=A , default=10 )
parser.add_argument('''--weight_decay''' , type=A , default=0.0_1 )
parser.add_argument('''--output_dir''' , type=A , default='''./results''' )
return parser.parse_args()
_snake_case = load("accuracy")
def lowerCamelCase_ ( A : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase_ , lowerCAmelCase_ = eval_pred
lowerCAmelCase_ = np.argmax(A , axis=1 )
return metric.compute(predictions=A , references=A )
class UpperCamelCase_ ( A ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase):
super().__init__()
lowerCAmelCase_ = trainer
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase):
if control.should_evaluate:
lowerCAmelCase_ = deepcopy(_UpperCAmelCase)
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='''train''')
return control_copy
def lowerCamelCase_ ( ):
"""simple docstring"""
lowerCAmelCase_ = get_args()
set_seed(args.seed )
lowerCAmelCase_ = load_dataset('''codeparrot/codecomplex''' , split='''train''' )
lowerCAmelCase_ = dataset.train_test_split(test_size=0.2 )
lowerCAmelCase_ = train_test['''test'''].train_test_split(test_size=0.5 )
lowerCAmelCase_ = DatasetDict(
{
'''train''': train_test['''train'''],
'''test''': test_validation['''train'''],
'''valid''': test_validation['''test'''],
} )
print('''Loading tokenizer and model''' )
lowerCAmelCase_ = AutoTokenizer.from_pretrained(args.model_ckpt )
lowerCAmelCase_ = tokenizer.eos_token
lowerCAmelCase_ = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
lowerCAmelCase_ = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
lowerCAmelCase_ = False
lowerCAmelCase_ = ClassLabel(num_classes=7 , names=list(set(train_test_validation['''train''']['''complexity'''] ) ) )
def tokenize(A : List[str] ):
lowerCAmelCase_ = tokenizer(example['''src'''] , truncation=A , max_length=10_24 )
lowerCAmelCase_ = labels.straint(example['''complexity'''] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
lowerCAmelCase_ = train_test_validation.map(
A , batched=A , remove_columns=train_test_validation['''train'''].column_names , )
lowerCAmelCase_ = DataCollatorWithPadding(tokenizer=A )
lowerCAmelCase_ = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy='''epoch''' , save_strategy='''epoch''' , logging_strategy='''epoch''' , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.0_1 , metric_for_best_model='''accuracy''' , run_name='''complexity-java''' , report_to='''wandb''' , )
lowerCAmelCase_ = Trainer(
model=A , args=A , train_dataset=tokenized_datasets['''train'''] , eval_dataset=tokenized_datasets['''valid'''] , tokenizer=A , data_collator=A , compute_metrics=A , )
print('''Training...''' )
trainer.add_callback(CustomCallback(A ) )
trainer.train()
if __name__ == "__main__":
main()
| 413 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {
"configuration_lxmert": ["LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LxmertConfig"],
"tokenization_lxmert": ["LxmertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["LxmertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"LxmertEncoder",
"LxmertForPreTraining",
"LxmertForQuestionAnswering",
"LxmertModel",
"LxmertPreTrainedModel",
"LxmertVisualFeatureEncoder",
"LxmertXLayer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLxmertForPreTraining",
"TFLxmertMainLayer",
"TFLxmertModel",
"TFLxmertPreTrainedModel",
"TFLxmertVisualFeatureEncoder",
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 413 | 1 |
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case_ ( lowerCAmelCase , unittest.TestCase ):
__lowerCamelCase : Any = TransfoXLTokenizer
__lowerCamelCase : int = False
__lowerCamelCase : str = False
def __A ( self ):
super().setUp()
SCREAMING_SNAKE_CASE_ : int = [
'<unk>',
'[CLS]',
'[SEP]',
'want',
'unwanted',
'wa',
'un',
'running',
',',
'low',
'l',
]
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def __A ( self , **__lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Dict = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def __A ( self , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Optional[int] = '<unk> UNwanted , running'
SCREAMING_SNAKE_CASE_ : List[str] = '<unk> unwanted, running'
return input_text, output_text
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = tokenizer.tokenize('<unk> UNwanted , running' )
self.assertListEqual(__lowerCAmelCase , ['<unk>', 'unwanted', ',', 'running'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [0, 4, 8, 7] )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : int = TransfoXLTokenizer(lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TransfoXLTokenizer(lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = TransfoXLTokenizer(lower_case=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'
SCREAMING_SNAKE_CASE_ : Dict = [
'Hello',
'(',
'bracket',
')',
'and',
'side',
'@-@',
'scrolled',
'[',
'and',
']',
'Henry',
'\'s',
'$',
'5',
'@,@',
'000',
'with',
'3',
'@.@',
'34',
'm',
'.',
'What',
'\'s',
'up',
'!',
'?',
]
self.assertListEqual(tokenizer.tokenize(__lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(tokenizer.convert_tokens_to_string(__lowerCAmelCase ) , __lowerCAmelCase )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = len(__lowerCAmelCase )
tokenizer.add_tokens(['new1', 'new2'] )
tokenizer.move_added_token('new1' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(__lowerCAmelCase ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('new1' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , 'new1' )
| 345 |
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> Dict:
return x + 2
class snake_case_ ( unittest.TestCase ):
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = 'x = 3'
SCREAMING_SNAKE_CASE_ : Dict = {}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = evaluate(__lowerCAmelCase , {} , state=__lowerCAmelCase )
assert result == 3
self.assertDictEqual(__lowerCAmelCase , {'x': 3} )
SCREAMING_SNAKE_CASE_ : str = 'x = y'
SCREAMING_SNAKE_CASE_ : Tuple = {'y': 5}
SCREAMING_SNAKE_CASE_ : str = evaluate(__lowerCAmelCase , {} , state=__lowerCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__lowerCAmelCase , {'x': 5, 'y': 5} )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'y = add_two(x)'
SCREAMING_SNAKE_CASE_ : Optional[int] = {'x': 3}
SCREAMING_SNAKE_CASE_ : Any = evaluate(__lowerCAmelCase , {'add_two': add_two} , state=__lowerCAmelCase )
assert result == 5
self.assertDictEqual(__lowerCAmelCase , {'x': 3, 'y': 5} )
# Won't work without the tool
with CaptureStdout() as out:
SCREAMING_SNAKE_CASE_ : Tuple = evaluate(__lowerCAmelCase , {} , state=__lowerCAmelCase )
assert result is None
assert "tried to execute add_two" in out.out
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'x = 3'
SCREAMING_SNAKE_CASE_ : Optional[Any] = {}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = evaluate(__lowerCAmelCase , {} , state=__lowerCAmelCase )
assert result == 3
self.assertDictEqual(__lowerCAmelCase , {'x': 3} )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = 'test_dict = {\'x\': x, \'y\': add_two(x)}'
SCREAMING_SNAKE_CASE_ : str = {'x': 3}
SCREAMING_SNAKE_CASE_ : Dict = evaluate(__lowerCAmelCase , {'add_two': add_two} , state=__lowerCAmelCase )
self.assertDictEqual(__lowerCAmelCase , {'x': 3, 'y': 5} )
self.assertDictEqual(__lowerCAmelCase , {'x': 3, 'test_dict': {'x': 3, 'y': 5}} )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = 'x = 3\ny = 5'
SCREAMING_SNAKE_CASE_ : Dict = {}
SCREAMING_SNAKE_CASE_ : Optional[int] = evaluate(__lowerCAmelCase , {} , state=__lowerCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__lowerCAmelCase , {'x': 3, 'y': 5} )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Dict = 'text = f\'This is x: {x}.\''
SCREAMING_SNAKE_CASE_ : Any = {'x': 3}
SCREAMING_SNAKE_CASE_ : Tuple = evaluate(__lowerCAmelCase , {} , state=__lowerCAmelCase )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(__lowerCAmelCase , {'x': 3, 'text': 'This is x: 3.'} )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'if x <= 3:\n y = 2\nelse:\n y = 5'
SCREAMING_SNAKE_CASE_ : Optional[Any] = {'x': 3}
SCREAMING_SNAKE_CASE_ : List[Any] = evaluate(__lowerCAmelCase , {} , state=__lowerCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(__lowerCAmelCase , {'x': 3, 'y': 2} )
SCREAMING_SNAKE_CASE_ : Dict = {'x': 8}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = evaluate(__lowerCAmelCase , {} , state=__lowerCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__lowerCAmelCase , {'x': 8, 'y': 5} )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : str = 'test_list = [x, add_two(x)]'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {'x': 3}
SCREAMING_SNAKE_CASE_ : Optional[Any] = evaluate(__lowerCAmelCase , {'add_two': add_two} , state=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , [3, 5] )
self.assertDictEqual(__lowerCAmelCase , {'x': 3, 'test_list': [3, 5]} )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Dict = 'y = x'
SCREAMING_SNAKE_CASE_ : int = {'x': 3}
SCREAMING_SNAKE_CASE_ : Any = evaluate(__lowerCAmelCase , {} , state=__lowerCAmelCase )
assert result == 3
self.assertDictEqual(__lowerCAmelCase , {'x': 3, 'y': 3} )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = 'test_list = [x, add_two(x)]\ntest_list[1]'
SCREAMING_SNAKE_CASE_ : Any = {'x': 3}
SCREAMING_SNAKE_CASE_ : Tuple = evaluate(__lowerCAmelCase , {'add_two': add_two} , state=__lowerCAmelCase )
assert result == 5
self.assertDictEqual(__lowerCAmelCase , {'x': 3, 'test_list': [3, 5]} )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'
SCREAMING_SNAKE_CASE_ : Any = {'x': 3}
SCREAMING_SNAKE_CASE_ : List[Any] = evaluate(__lowerCAmelCase , {'add_two': add_two} , state=__lowerCAmelCase )
assert result == 5
self.assertDictEqual(__lowerCAmelCase , {'x': 3, 'test_dict': {'x': 3, 'y': 5}} )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = 'x = 0\nfor i in range(3):\n x = i'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {}
SCREAMING_SNAKE_CASE_ : Tuple = evaluate(__lowerCAmelCase , {'range': range} , state=__lowerCAmelCase )
assert result == 2
self.assertDictEqual(__lowerCAmelCase , {'x': 2, 'i': 2} )
| 345 | 1 |
'''simple docstring'''
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(""".""")
def _lowerCAmelCase ( __magic_name__ : Dict ) -> Optional[int]:
lowercase : int =test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'''`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '''
f'''{test_file} instead.''' )
lowercase : Union[str, Any] =components[-1]
if not test_fn.endswith('''py''' ):
raise ValueError(f'''`test_file` should be a python file. Got {test_fn} instead.''' )
if not test_fn.startswith('''test_modeling_''' ):
raise ValueError(
f'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' )
lowercase : Optional[int] =components[:-1] + [test_fn.replace('''.py''' , '''''' )]
lowercase : Dict ='''.'''.join(__magic_name__ )
return test_module_path
def _lowerCAmelCase ( __magic_name__ : Dict ) -> Dict:
lowercase : Optional[int] =get_module_path(__magic_name__ )
lowercase : Tuple =importlib.import_module(__magic_name__ )
return test_module
def _lowerCAmelCase ( __magic_name__ : Union[str, Any] ) -> Tuple:
lowercase : Optional[int] =[]
lowercase : Union[str, Any] =get_test_module(__magic_name__ )
for attr in dir(__magic_name__ ):
if attr.endswith('''ModelTester''' ):
tester_classes.append(getattr(__magic_name__ , __magic_name__ ) )
# sort with class names
return sorted(__magic_name__ , key=lambda __magic_name__ : x.__name__ )
def _lowerCAmelCase ( __magic_name__ : List[str] ) -> Optional[int]:
lowercase : Optional[Any] =[]
lowercase : str =get_test_module(__magic_name__ )
for attr in dir(__magic_name__ ):
lowercase : Any =getattr(__magic_name__ , __magic_name__ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
lowercase : Optional[int] =getattr(__magic_name__ , '''all_model_classes''' , [] )
if len(__magic_name__ ) > 0:
test_classes.append(__magic_name__ )
# sort with class names
return sorted(__magic_name__ , key=lambda __magic_name__ : x.__name__ )
def _lowerCAmelCase ( __magic_name__ : Dict ) -> Union[str, Any]:
lowercase : List[str] =get_test_classes(__magic_name__ )
lowercase : List[str] =set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(__magic_name__ , key=lambda __magic_name__ : x.__name__ )
def _lowerCAmelCase ( __magic_name__ : Any ) -> Union[str, Any]:
lowercase : List[Any] =test_class()
if hasattr(__magic_name__ , '''setUp''' ):
test.setUp()
lowercase : List[Any] =None
if hasattr(__magic_name__ , '''model_tester''' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
lowercase : Dict =test.model_tester.__class__
return model_tester
def _lowerCAmelCase ( __magic_name__ : Any , __magic_name__ : int ) -> int:
lowercase : Tuple =get_test_classes(__magic_name__ )
lowercase : Optional[int] =[]
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(__magic_name__ )
# sort with class names
return sorted(__magic_name__ , key=lambda __magic_name__ : x.__name__ )
def _lowerCAmelCase ( __magic_name__ : Optional[Any] , __magic_name__ : Tuple ) -> str:
lowercase : Any =get_test_classes_for_model(__magic_name__ , __magic_name__ )
lowercase : List[Any] =[]
for test_class in test_classes:
lowercase : int =get_model_tester_from_test_class(__magic_name__ )
if tester_class is not None:
tester_classes.append(__magic_name__ )
# sort with class names
return sorted(__magic_name__ , key=lambda __magic_name__ : x.__name__ )
def _lowerCAmelCase ( __magic_name__ : Optional[Any] ) -> Tuple:
lowercase : Tuple =get_test_classes(__magic_name__ )
lowercase : Optional[int] ={test_class: get_model_tester_from_test_class(__magic_name__ ) for test_class in test_classes}
return test_tester_mapping
def _lowerCAmelCase ( __magic_name__ : Union[str, Any] ) -> int:
lowercase : Optional[int] =get_model_classes(__magic_name__ )
lowercase : Any ={
model_class: get_test_classes_for_model(__magic_name__ , __magic_name__ ) for model_class in model_classes
}
return model_test_mapping
def _lowerCAmelCase ( __magic_name__ : int ) -> Optional[int]:
lowercase : Dict =get_model_classes(__magic_name__ )
lowercase : Any ={
model_class: get_tester_classes_for_model(__magic_name__ , __magic_name__ ) for model_class in model_classes
}
return model_to_tester_mapping
def _lowerCAmelCase ( __magic_name__ : Optional[Any] ) -> int:
if isinstance(__magic_name__ , __magic_name__ ):
return o
elif isinstance(__magic_name__ , __magic_name__ ):
return o.__name__
elif isinstance(__magic_name__ , (list, tuple) ):
return [to_json(__magic_name__ ) for x in o]
elif isinstance(__magic_name__ , __magic_name__ ):
return {to_json(__magic_name__ ): to_json(__magic_name__ ) for k, v in o.items()}
else:
return o
| 710 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int ):
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
lowercase : Any =DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
@torch.no_grad()
def __call__( self : List[Any] , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : int = 50 , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , ):
'''simple docstring'''
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , UpperCAmelCase__ ):
lowercase : Optional[int] =(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
lowercase : Optional[int] =(batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and len(UpperCAmelCase__ ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(UpperCAmelCase__ )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowercase : str =randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(UpperCAmelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowercase : Dict =self.unet(UpperCAmelCase__ , UpperCAmelCase__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowercase : Dict =self.scheduler.step(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , eta=UpperCAmelCase__ , use_clipped_model_output=UpperCAmelCase__ , generator=UpperCAmelCase__ ).prev_sample
lowercase : Optional[Any] =(image / 2 + 0.5).clamp(0 , 1 )
lowercase : Optional[Any] =image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase : List[str] =self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase__ )
| 88 | 0 |
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase__ ( _A):
"""simple docstring"""
def __init__( self : Union[str, Any] , *__lowerCAmelCase : Tuple , **__lowerCAmelCase : str ) -> None:
warnings.warn(
'''The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use VideoMAEImageProcessor instead.''' , __lowerCAmelCase , )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
| 2 |
'''simple docstring'''
import gc
import threading
import time
import psutil
import torch
class _a :
"""simple docstring"""
def __init__( self ) -> str:
_SCREAMING_SNAKE_CASE = psutil.Process()
_SCREAMING_SNAKE_CASE = False
def UpperCamelCase ( self ) -> Dict:
_SCREAMING_SNAKE_CASE = -1
while True:
_SCREAMING_SNAKE_CASE = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = threading.Thread(target=self.peak_monitor )
_SCREAMING_SNAKE_CASE = True
self.thread.start()
def UpperCamelCase ( self ) -> Dict:
_SCREAMING_SNAKE_CASE = False
self.thread.join()
return self.cpu_memory_peak
UpperCamelCase__ : Tuple = PeakCPUMemory()
def lowerCAmelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
# Time
_SCREAMING_SNAKE_CASE = {"""time""": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
_SCREAMING_SNAKE_CASE = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
_SCREAMING_SNAKE_CASE = torch.cuda.memory_allocated(SCREAMING_SNAKE_CASE_ )
torch.cuda.reset_peak_memory_stats()
return measures
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> List[str]:
"""simple docstring"""
# Time
_SCREAMING_SNAKE_CASE = {"""time""": time.time() - start_measures["""time"""]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
_SCREAMING_SNAKE_CASE = (psutil.Process().memory_info().rss - start_measures["""cpu"""]) / 2**20
_SCREAMING_SNAKE_CASE = (cpu_peak_tracker.stop() - start_measures["""cpu"""]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
_SCREAMING_SNAKE_CASE = (torch.cuda.memory_allocated(SCREAMING_SNAKE_CASE_ ) - start_measures[str(SCREAMING_SNAKE_CASE_ )]) / 2**20
_SCREAMING_SNAKE_CASE = (torch.cuda.max_memory_allocated(SCREAMING_SNAKE_CASE_ ) - start_measures[str(SCREAMING_SNAKE_CASE_ )]) / 2**20
return measures
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
"""simple docstring"""
print(F"{description}:" )
print(F"- Time: {measures['time']:.2f}s" )
for i in range(torch.cuda.device_count() ):
print(F"- GPU {i} allocated: {measures[str(SCREAMING_SNAKE_CASE_ )]:.2f}MiB" )
_SCREAMING_SNAKE_CASE = measures[F"{i}-peak"]
print(F"- GPU {i} peak: {peak:.2f}MiB" )
print(F"- CPU RAM allocated: {measures['cpu']:.2f}MiB" )
print(F"- CPU RAM peak: {measures['cpu-peak']:.2f}MiB" )
| 591 | 0 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase__ ( __lowercase , unittest.TestCase ):
UpperCamelCase_ : str = MgpstrTokenizer
UpperCamelCase_ : Union[str, Any] = False
UpperCamelCase_ : str = {}
UpperCamelCase_ : int = False
def A_ ( self ) -> Tuple:
'''simple docstring'''
super().setUp()
# fmt: off
_UpperCamelCase = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
_UpperCamelCase = dict(zip(a , range(len(a ) ) ) )
_UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(a ) + """\n""" )
def A_ ( self , **a ) -> Optional[int]:
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **a )
def A_ ( self , a ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = """tester"""
_UpperCamelCase = """tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def A_ ( self ) -> Dict:
'''simple docstring'''
pass
def A_ ( self ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.get_tokenizers(do_lower_case=a )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
_UpperCamelCase = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token} )
_UpperCamelCase = tokenizer.encode([special_token] , add_special_tokens=a )
self.assertEqual(len(a ) , 1 )
_UpperCamelCase = tokenizer.decode(a , skip_special_tokens=a )
self.assertTrue(special_token not in decoded )
def A_ ( self ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
_UpperCamelCase , _UpperCamelCase = self.get_input_output_texts(a )
_UpperCamelCase = tokenizer.tokenize(a )
_UpperCamelCase = tokenizer.convert_tokens_to_ids(a )
_UpperCamelCase = tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(a )
self.assertNotEqual(len(a ) , 0 )
_UpperCamelCase = tokenizer.decode(a )
self.assertIsInstance(a , a )
self.assertEqual(text_a.replace(""" """ , """""" ) , a )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def A_ ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def A_ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
| 202 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json",
}
class lowerCAmelCase__ ( __lowercase ):
UpperCamelCase_ : int = "deta"
UpperCamelCase_ : Union[str, Any] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , a=None , a=9_00 , a=20_48 , a=6 , a=20_48 , a=8 , a=6 , a=10_24 , a=8 , a=0.0 , a=True , a="relu" , a=2_56 , a=0.1 , a=0.0 , a=0.0 , a=0.02 , a=1.0 , a=True , a=False , a="sine" , a=5 , a=4 , a=4 , a=True , a=3_00 , a=True , a=True , a=1 , a=5 , a=2 , a=1 , a=1 , a=5 , a=2 , a=0.1 , a=0.25 , **a , ) -> Dict:
'''simple docstring'''
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
_UpperCamelCase = CONFIG_MAPPING["""resnet"""](out_features=["""stage2""", """stage3""", """stage4"""] )
else:
if isinstance(a , a ):
_UpperCamelCase = backbone_config.pop("""model_type""" )
_UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCamelCase = config_class.from_dict(a )
_UpperCamelCase = backbone_config
_UpperCamelCase = num_queries
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = d_model
_UpperCamelCase = encoder_ffn_dim
_UpperCamelCase = encoder_layers
_UpperCamelCase = encoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = activation_function
_UpperCamelCase = init_std
_UpperCamelCase = init_xavier_std
_UpperCamelCase = encoder_layerdrop
_UpperCamelCase = auxiliary_loss
_UpperCamelCase = position_embedding_type
# deformable attributes
_UpperCamelCase = num_feature_levels
_UpperCamelCase = encoder_n_points
_UpperCamelCase = decoder_n_points
_UpperCamelCase = two_stage
_UpperCamelCase = two_stage_num_proposals
_UpperCamelCase = with_box_refine
_UpperCamelCase = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("""If two_stage is True, with_box_refine must be True.""" )
# Hungarian matcher
_UpperCamelCase = class_cost
_UpperCamelCase = bbox_cost
_UpperCamelCase = giou_cost
# Loss coefficients
_UpperCamelCase = mask_loss_coefficient
_UpperCamelCase = dice_loss_coefficient
_UpperCamelCase = bbox_loss_coefficient
_UpperCamelCase = giou_loss_coefficient
_UpperCamelCase = eos_coefficient
_UpperCamelCase = focal_alpha
super().__init__(is_encoder_decoder=a , **a )
@property
def A_ ( self ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def A_ ( self ) -> int:
'''simple docstring'''
return self.d_model
def A_ ( self ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = copy.deepcopy(self.__dict__ )
_UpperCamelCase = self.backbone_config.to_dict()
_UpperCamelCase = self.__class__.model_type
return output
| 202 | 1 |
'''simple docstring'''
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class a_ :
@staticmethod
def __UpperCamelCase ( *snake_case_ , **snake_case_ ):
pass
def _UpperCAmelCase ( _lowerCamelCase : Image ) -> str:
_lowerCAmelCase : Optional[int] = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def _UpperCAmelCase ( _lowerCamelCase : Image ) -> Dict:
_lowerCAmelCase : Any = np.array(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = npimg.shape
return {"hash": hashimage(_lowerCamelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class a_ (unittest.TestCase ):
__lowerCAmelCase : int = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
__lowerCAmelCase : Any = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : Optional[Any] = MaskGenerationPipeline(model=snake_case_ , image_processor=snake_case_ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __UpperCamelCase ( self , snake_case_ , snake_case_ ):
pass
@require_tf
@unittest.skip("""Image segmentation not implemented in TF""" )
def __UpperCamelCase ( self ):
pass
@slow
@require_torch
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = pipeline("""mask-generation""" , model="""facebook/sam-vit-huge""" )
_lowerCAmelCase : Optional[Any] = image_segmenter("""http://images.cocodataset.org/val2017/000000039769.jpg""" , points_per_batch=2_5_6 )
# Shortening by hashing
_lowerCAmelCase : Tuple = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(snake_case_ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (4_8_0, 6_4_0)}, """scores""": 1.0444},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (4_8_0, 6_4_0)}, """scores""": 1.021},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (4_8_0, 6_4_0)}, """scores""": 1.0167},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (4_8_0, 6_4_0)}, """scores""": 1.0132},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (4_8_0, 6_4_0)}, """scores""": 1.0053},
{"""mask""": {"""hash""": """e2d0b7a0b7""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.9967},
{"""mask""": {"""hash""": """453c7844bd""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.993},
{"""mask""": {"""hash""": """3d44f2926d""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.9909},
{"""mask""": {"""hash""": """64033ddc3f""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.9879},
{"""mask""": {"""hash""": """801064ff79""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.9834},
{"""mask""": {"""hash""": """6172f276ef""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.9716},
{"""mask""": {"""hash""": """b49e60e084""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.9612},
{"""mask""": {"""hash""": """a811e775fd""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.9599},
{"""mask""": {"""hash""": """a6a8ebcf4b""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.9552},
{"""mask""": {"""hash""": """9d8257e080""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.9532},
{"""mask""": {"""hash""": """32de6454a8""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.9516},
{"""mask""": {"""hash""": """af3d4af2c8""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.9499},
{"""mask""": {"""hash""": """3c6db475fb""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.9483},
{"""mask""": {"""hash""": """c290813fb9""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.9464},
{"""mask""": {"""hash""": """b6f0b8f606""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.943},
{"""mask""": {"""hash""": """92ce16bfdf""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.943},
{"""mask""": {"""hash""": """c749b25868""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.9408},
{"""mask""": {"""hash""": """efb6cab859""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.9335},
{"""mask""": {"""hash""": """1ff2eafb30""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.9326},
{"""mask""": {"""hash""": """788b798e24""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.9262},
{"""mask""": {"""hash""": """abea804f0e""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.8999},
{"""mask""": {"""hash""": """7b9e8ddb73""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.8986},
{"""mask""": {"""hash""": """cd24047c8a""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.8984},
{"""mask""": {"""hash""": """6943e6bcbd""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.8873},
{"""mask""": {"""hash""": """b5f47c9191""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.8871}
] , )
# fmt: on
@require_torch
@slow
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = """facebook/sam-vit-huge"""
_lowerCAmelCase : str = pipeline("""mask-generation""" , model=snake_case_ )
_lowerCAmelCase : List[Any] = image_segmenter(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , pred_iou_thresh=1 , points_per_batch=2_5_6 )
# Shortening by hashing
_lowerCAmelCase : Union[str, Any] = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(snake_case_ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (4_8_0, 6_4_0)}, """scores""": 1.0444},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (4_8_0, 6_4_0)}, """scores""": 1.0210},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (4_8_0, 6_4_0)}, """scores""": 1.0167},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (4_8_0, 6_4_0)}, """scores""": 1.0132},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (4_8_0, 6_4_0)}, """scores""": 1.0053},
] , )
| 384 |
'''simple docstring'''
from math import factorial
UpperCamelCase_ = {str(digit): factorial(digit) for digit in range(10)}
def _UpperCAmelCase ( _lowerCamelCase : int ) -> int:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("""Parameter number must be int""" )
if number < 0:
raise ValueError("""Parameter number must be greater than or equal to 0""" )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(_lowerCamelCase ) )
def _UpperCAmelCase ( _lowerCamelCase : int = 60 , _lowerCamelCase : int = 1_00_00_00 ) -> int:
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("""Parameters chain_length and number_limit must be int""" )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
"""Parameters chain_length and number_limit must be greater than 0""" )
# the counter for the chains with the exact desired length
_lowerCAmelCase : Union[str, Any] = 0
# the cached sizes of the previous chains
_lowerCAmelCase : dict[int, int] = {}
for start_chain_element in range(1 , _lowerCamelCase ):
# The temporary set will contain the elements of the chain
_lowerCAmelCase : Any = set()
_lowerCAmelCase : Dict = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
_lowerCAmelCase : Union[str, Any] = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(_lowerCamelCase )
chain_set_length += 1
_lowerCAmelCase : List[Any] = digit_factorial_sum(_lowerCamelCase )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
_lowerCAmelCase : Union[str, Any] = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{solution()}')
| 384 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def _UpperCAmelCase ( __lowerCamelCase : str ) -> None:
_snake_case , _snake_case = analyze_text(__lowerCamelCase )
_snake_case = list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
_snake_case = sum(single_char_strings.values() )
# one length string
_snake_case = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
_snake_case = single_char_strings[ch]
_snake_case = my_str / all_sum
my_fir_sum += prob * math.loga(__lowerCamelCase ) # entropy formula.
# print entropy
print(f'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
_snake_case = sum(two_char_strings.values() )
_snake_case = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
_snake_case = cha + cha
if sequence in two_char_strings:
_snake_case = two_char_strings[sequence]
_snake_case = int(__lowerCamelCase ) / all_sum
my_sec_sum += prob * math.loga(__lowerCamelCase )
# print second entropy
print(f'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def _UpperCAmelCase ( __lowerCamelCase : str ) -> tuple[dict, dict]:
_snake_case = Counter() # type: ignore
_snake_case = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(__lowerCamelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def _UpperCAmelCase ( ) -> Union[str, Any]:
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 706 |
"""simple docstring"""
def _UpperCAmelCase ( __lowerCamelCase : int ) -> str:
_snake_case = int(__lowerCamelCase )
if decimal in (0, 1): # Exit cases for the recursion
return str(__lowerCamelCase )
_snake_case , _snake_case = divmod(__lowerCamelCase , 2 )
return binary_recursive(__lowerCamelCase ) + str(__lowerCamelCase )
def _UpperCAmelCase ( __lowerCamelCase : str ) -> str:
_snake_case = str(__lowerCamelCase ).strip()
if not number:
raise ValueError('''No input value was provided''' )
_snake_case = '''-''' if number.startswith('''-''' ) else ''''''
_snake_case = number.lstrip('''-''' )
if not number.isnumeric():
raise ValueError('''Input value is not an integer''' )
return f'''{negative}0b{binary_recursive(int(__lowerCamelCase ) )}'''
if __name__ == "__main__":
from doctest import testmod
testmod()
| 430 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Any = logging.get_logger(__name__)
snake_case_ : int = {
"""MIT/ast-finetuned-audioset-10-10-0.4593""": (
"""https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"""
),
}
class snake_case__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = '''audio-spectrogram-transformer'''
def __init__( self : Dict , lowercase : Any=7_68 , lowercase : List[str]=12 , lowercase : Tuple=12 , lowercase : str=30_72 , lowercase : List[str]="gelu" , lowercase : int=0.0 , lowercase : Tuple=0.0 , lowercase : int=0.0_2 , lowercase : str=1E-12 , lowercase : Union[str, Any]=16 , lowercase : str=True , lowercase : Optional[int]=10 , lowercase : Any=10 , lowercase : List[str]=10_24 , lowercase : Union[str, Any]=1_28 , **lowercase : Optional[Any] , ):
'''simple docstring'''
super().__init__(**lowercase )
UpperCAmelCase : List[str] = hidden_size
UpperCAmelCase : Dict = num_hidden_layers
UpperCAmelCase : Union[str, Any] = num_attention_heads
UpperCAmelCase : Union[str, Any] = intermediate_size
UpperCAmelCase : List[str] = hidden_act
UpperCAmelCase : Optional[Any] = hidden_dropout_prob
UpperCAmelCase : List[str] = attention_probs_dropout_prob
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : str = layer_norm_eps
UpperCAmelCase : Tuple = patch_size
UpperCAmelCase : Optional[Any] = qkv_bias
UpperCAmelCase : Tuple = frequency_stride
UpperCAmelCase : Any = time_stride
UpperCAmelCase : int = max_length
UpperCAmelCase : Optional[int] = num_mel_bins
| 595 |
"""simple docstring"""
def lowercase_ ( _lowercase : int ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
raise TypeError("only integers accepted as input" )
else:
UpperCAmelCase : Optional[int] = str(abs(_lowercase ) )
UpperCAmelCase : Union[str, Any] = [list(_lowercase ) for char in range(len(_lowercase ) )]
for index in range(len(_lowercase ) ):
num_transpositions[index].pop(_lowercase )
return max(
int("".join(list(_lowercase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 595 | 1 |
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self):
debug_launcher(test_script.main)
def A__ ( self):
debug_launcher(test_ops.main)
| 648 |
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 648 | 1 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
lowerCAmelCase_ : Tuple = logging.get_logger(__name__)
lowerCAmelCase_ : List[str] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
lowerCAmelCase_ : Dict = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__a =field(
default=lowerCamelCase_ , metadata={'help': 'Model type selected in the list: ' + ', '.join(lowerCamelCase_ )} )
__a =field(
default=lowerCamelCase_ , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
__a =field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__a =field(
default=128 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
__a =field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
__a =field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
__a =field(
default=lowerCamelCase_ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
__a =field(
default=lowerCamelCase_ , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
__a =field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
__a =field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
__a =field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
__a =field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='train'
__a ='dev'
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =42
__a =42
__a =42
__a =42
def __init__( self : Tuple , __a : SquadDataTrainingArguments , __a : PreTrainedTokenizer , __a : Optional[int] = None , __a : Union[str, Split] = Split.train , __a : Optional[bool] = False , __a : Optional[str] = None , __a : Optional[str] = "pt" , ):
_a = args
_a = is_language_sensitive
_a = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__a , __a ):
try:
_a = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
_a = mode
# Load data features from cache or dataset file
_a = "v2" if args.version_2_with_negative else "v1"
_a = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_a = cached_features_file + ".lock"
with FileLock(__a ):
if os.path.exists(__a ) and not args.overwrite_cache:
_a = time.time()
_a = torch.load(__a )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
_a = self.old_features["features"]
_a = self.old_features.get("dataset" , __a )
_a = self.old_features.get("examples" , __a )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
" future run" )
else:
if mode == Split.dev:
_a = self.processor.get_dev_examples(args.data_dir )
else:
_a = self.processor.get_train_examples(args.data_dir )
_a , _a = squad_convert_examples_to_features(
examples=self.examples , tokenizer=__a , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=__a , )
_a = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , __a , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self : Any ):
return len(self.features )
def __getitem__( self : Any , __a : Dict ):
# Convert to Tensors and build dataset
_a = self.features[i]
_a = torch.tensor(feature.input_ids , dtype=torch.long )
_a = torch.tensor(feature.attention_mask , dtype=torch.long )
_a = torch.tensor(feature.token_type_ids , dtype=torch.long )
_a = torch.tensor(feature.cls_index , dtype=torch.long )
_a = torch.tensor(feature.p_mask , dtype=torch.float )
_a = torch.tensor(feature.is_impossible , dtype=torch.float )
_a = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
_a = torch.tensor(feature.start_position , dtype=torch.long )
_a = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 692 |
'''simple docstring'''
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__a =FlaxAutoencoderKL
@property
def UpperCamelCase__ ( self : str ):
_a = 4
_a = 3
_a = (32, 32)
_a = jax.random.PRNGKey(0 )
_a = jax.random.uniform(__a , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def UpperCamelCase__ ( self : List[Any] ):
_a = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
_a = self.dummy_input
return init_dict, inputs_dict
| 692 | 1 |
'''simple docstring'''
from collections.abc import Sequence
def UpperCamelCase_ ( A__ : Sequence[float] , A__ : float ):
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(A__ ) )
def UpperCamelCase_ ( A__ : Sequence[float] , A__ : float ):
'''simple docstring'''
lowerCAmelCase_ : str = 0.0
for coeff in reversed(A__ ):
lowerCAmelCase_ : List[str] = result * x + coeff
return result
if __name__ == "__main__":
__A : Union[str, Any] = (0.0, 0.0, 5.0, 9.3, 7.0)
__A : Union[str, Any] = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 708 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase : Optional[int] , lowerCamelCase : int=13 , lowerCamelCase : Tuple=7 , lowerCamelCase : List[Any]=True , lowerCamelCase : List[str]=True , lowerCamelCase : Any=False , lowerCamelCase : str=True , lowerCamelCase : int=99 , lowerCamelCase : str=32 , lowerCamelCase : Dict=5 , lowerCamelCase : List[Any]=4 , lowerCamelCase : List[Any]=37 , lowerCamelCase : str="gelu" , lowerCamelCase : Optional[int]=0.1 , lowerCamelCase : Optional[Any]=0.1 , lowerCamelCase : str=5_12 , lowerCamelCase : Optional[int]=16 , lowerCamelCase : List[Any]=2 , lowerCamelCase : Optional[Any]=0.02 , lowerCamelCase : Tuple=3 , lowerCamelCase : Optional[int]=4 , lowerCamelCase : List[str]=None , ) -> int:
lowerCAmelCase_ : Optional[int] = parent
lowerCAmelCase_ : Tuple = batch_size
lowerCAmelCase_ : Union[str, Any] = seq_length
lowerCAmelCase_ : Union[str, Any] = is_training
lowerCAmelCase_ : Union[str, Any] = use_input_mask
lowerCAmelCase_ : Dict = use_token_type_ids
lowerCAmelCase_ : Tuple = use_labels
lowerCAmelCase_ : Optional[int] = vocab_size
lowerCAmelCase_ : Any = hidden_size
lowerCAmelCase_ : Union[str, Any] = num_hidden_layers
lowerCAmelCase_ : int = num_attention_heads
lowerCAmelCase_ : Optional[int] = intermediate_size
lowerCAmelCase_ : Tuple = hidden_act
lowerCAmelCase_ : Dict = hidden_dropout_prob
lowerCAmelCase_ : Tuple = attention_probs_dropout_prob
lowerCAmelCase_ : Optional[Any] = max_position_embeddings
lowerCAmelCase_ : Dict = type_vocab_size
lowerCAmelCase_ : Dict = type_sequence_label_size
lowerCAmelCase_ : str = initializer_range
lowerCAmelCase_ : Union[str, Any] = num_labels
lowerCAmelCase_ : Optional[Any] = num_choices
lowerCAmelCase_ : Optional[Any] = scope
def __lowercase ( self : str ) -> str:
lowerCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ : Optional[Any] = None
if self.use_input_mask:
lowerCAmelCase_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ : Optional[Any] = None
lowerCAmelCase_ : Union[str, Any] = None
lowerCAmelCase_ : int = None
if self.use_labels:
lowerCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase_ : Union[str, Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowercase ( self : Union[str, Any] ) -> str:
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def __lowercase ( self : Union[str, Any] , lowerCamelCase : int , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any] , lowerCamelCase : Any , lowerCamelCase : int , lowerCamelCase : Dict ) -> Tuple:
lowerCAmelCase_ : List[Any] = DistilBertModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowerCAmelCase_ : Optional[int] = model(lowerCamelCase , lowerCamelCase )
lowerCAmelCase_ : str = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : List[str] , lowerCamelCase : List[str] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Tuple , lowerCamelCase : Dict , lowerCamelCase : Dict , lowerCamelCase : Optional[int] ) -> List[Any]:
lowerCAmelCase_ : Tuple = DistilBertForMaskedLM(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowerCAmelCase_ : Tuple = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowercase ( self : str , lowerCamelCase : List[str] , lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : int , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[int] ) -> Optional[int]:
lowerCAmelCase_ : Optional[int] = DistilBertForQuestionAnswering(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowerCAmelCase_ : Optional[int] = model(
lowerCamelCase , attention_mask=lowerCamelCase , start_positions=lowerCamelCase , end_positions=lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowercase ( self : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : int , lowerCamelCase : Tuple , lowerCamelCase : List[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Tuple ) -> Any:
lowerCAmelCase_ : int = self.num_labels
lowerCAmelCase_ : List[Any] = DistilBertForSequenceClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowerCAmelCase_ : str = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowercase ( self : Optional[Any] , lowerCamelCase : Any , lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Any , lowerCamelCase : Optional[int] ) -> int:
lowerCAmelCase_ : Tuple = self.num_labels
lowerCAmelCase_ : Optional[int] = DistilBertForTokenClassification(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowerCAmelCase_ : Dict = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowercase ( self : Tuple , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : List[str] , lowerCamelCase : str , lowerCamelCase : Any , lowerCamelCase : str ) -> Union[str, Any]:
lowerCAmelCase_ : List[Any] = self.num_choices
lowerCAmelCase_ : Optional[Any] = DistilBertForMultipleChoice(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowerCAmelCase_ : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ : str = model(
lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowercase ( self : Optional[int] ) -> Tuple:
lowerCAmelCase_ : str = self.prepare_config_and_inputs()
((lowerCAmelCase_), (lowerCAmelCase_), (lowerCAmelCase_), (lowerCAmelCase_), (lowerCAmelCase_), (lowerCAmelCase_)) : Dict = config_and_inputs
lowerCAmelCase_ : str = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __snake_case ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,unittest.TestCase):
"""simple docstring"""
lowercase = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
lowercase = (
{
'feature-extraction': DistilBertModel,
'fill-mask': DistilBertForMaskedLM,
'question-answering': DistilBertForQuestionAnswering,
'text-classification': DistilBertForSequenceClassification,
'token-classification': DistilBertForTokenClassification,
'zero-shot': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = True
lowercase = True
lowercase = True
lowercase = True
def __lowercase ( self : str ) -> List[str]:
lowerCAmelCase_ : List[str] = DistilBertModelTester(self )
lowerCAmelCase_ : str = ConfigTester(self , config_class=lowerCamelCase , dim=37 )
def __lowercase ( self : int ) -> str:
self.config_tester.run_common_tests()
def __lowercase ( self : Optional[int] ) -> List[str]:
lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowerCamelCase )
def __lowercase ( self : Optional[Any] ) -> Tuple:
lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowerCamelCase )
def __lowercase ( self : Optional[Any] ) -> int:
lowerCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowerCamelCase )
def __lowercase ( self : Optional[Any] ) -> Optional[Any]:
lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowerCamelCase )
def __lowercase ( self : Optional[int] ) -> Optional[Any]:
lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowerCamelCase )
def __lowercase ( self : Optional[int] ) -> Any:
lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowerCamelCase )
@slow
def __lowercase ( self : Tuple ) -> Optional[int]:
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : Any = DistilBertModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
@slow
@require_torch_gpu
def __lowercase ( self : Tuple ) -> List[Any]:
lowerCAmelCase_, lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
lowerCAmelCase_ : Union[str, Any] = True
lowerCAmelCase_ : List[str] = model_class(config=lowerCamelCase )
lowerCAmelCase_ : Union[str, Any] = self._prepare_for_class(lowerCamelCase , lowerCamelCase )
lowerCAmelCase_ : Union[str, Any] = torch.jit.trace(
lowerCamelCase , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowerCamelCase , os.path.join(lowerCamelCase , """traced_model.pt""" ) )
lowerCAmelCase_ : List[str] = torch.jit.load(os.path.join(lowerCamelCase , """traced_model.pt""" ) , map_location=lowerCamelCase )
loaded(inputs_dict["""input_ids"""].to(lowerCamelCase ) , inputs_dict["""attention_mask"""].to(lowerCamelCase ) )
@require_torch
class __snake_case ( unittest.TestCase):
"""simple docstring"""
@slow
def __lowercase ( self : str ) -> str:
lowerCAmelCase_ : int = DistilBertModel.from_pretrained("""distilbert-base-uncased""" )
lowerCAmelCase_ : str = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
lowerCAmelCase_ : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCAmelCase_ : Optional[Any] = model(lowerCamelCase , attention_mask=lowerCamelCase )[0]
lowerCAmelCase_ : Tuple = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , lowerCamelCase )
lowerCAmelCase_ : Optional[int] = torch.tensor(
[[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCamelCase , atol=1E-4 ) )
| 398 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
class _SCREAMING_SNAKE_CASE:
def __init__( self ,SCREAMING_SNAKE_CASE__ ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[Any] = value
__SCREAMING_SNAKE_CASE :Node | None = None
__SCREAMING_SNAKE_CASE :Node | None = None
class _SCREAMING_SNAKE_CASE:
def __init__( self ,SCREAMING_SNAKE_CASE__ ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Union[str, Any] = tree
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self ) -> Iterator[int]:
"""simple docstring"""
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod() | 498 |
"""simple docstring"""
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowerCamelCase_ = {
"text_branch": "text_model",
"audio_branch": "audio_model.audio_encoder",
"attn": "attention.self",
"self.proj": "output.dense",
"attention.self_mask": "attn_mask",
"mlp.fc1": "intermediate.dense",
"mlp.fc2": "output.dense",
"norm1": "layernorm_before",
"norm2": "layernorm_after",
"bn0": "batch_norm",
}
lowerCamelCase_ = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused", truncation="rand_trunc")
def __lowerCamelCase ( a_ : Any , a_ : Union[str, Any]=False ) -> int:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Dict = create_model(
'''HTSAT-tiny''' , '''roberta''' , a_ , precision='''fp32''' , device='''cuda:0''' if torch.cuda.is_available() else '''cpu''' , enable_fusion=a_ , fusion_type='''aff_2d''' if enable_fusion else None , )
return model, model_cfg
def __lowerCamelCase ( a_ : Optional[int] ) -> int:
__SCREAMING_SNAKE_CASE :Dict = {}
__SCREAMING_SNAKE_CASE :Any = r'''.*sequential.(\d+).*'''
__SCREAMING_SNAKE_CASE :Dict = r'''.*_projection.(\d+).*'''
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__SCREAMING_SNAKE_CASE :str = key.replace(a_ , a_ )
if re.match(a_ , a_ ):
# replace sequential layers with list
__SCREAMING_SNAKE_CASE :Tuple = re.match(a_ , a_ ).group(1 )
__SCREAMING_SNAKE_CASE :str = key.replace(f'''sequential.{sequential_layer}.''' , f'''layers.{int(a_ )//3}.linear.''' )
elif re.match(a_ , a_ ):
__SCREAMING_SNAKE_CASE :Union[str, Any] = int(re.match(a_ , a_ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
__SCREAMING_SNAKE_CASE :Union[str, Any] = 1 if projecton_layer == 0 else 2
__SCREAMING_SNAKE_CASE :Tuple = key.replace(f'''_projection.{projecton_layer}.''' , f'''_projection.linear{transformers_projection_layer}.''' )
if "audio" and "qkv" in key:
# split qkv into query key and value
__SCREAMING_SNAKE_CASE :Union[str, Any] = value
__SCREAMING_SNAKE_CASE :Optional[Any] = mixed_qkv.size(0 ) // 3
__SCREAMING_SNAKE_CASE :Optional[Any] = mixed_qkv[:qkv_dim]
__SCREAMING_SNAKE_CASE :Optional[Any] = mixed_qkv[qkv_dim : qkv_dim * 2]
__SCREAMING_SNAKE_CASE :str = mixed_qkv[qkv_dim * 2 :]
__SCREAMING_SNAKE_CASE :Dict = query_layer
__SCREAMING_SNAKE_CASE :Tuple = key_layer
__SCREAMING_SNAKE_CASE :str = value_layer
else:
__SCREAMING_SNAKE_CASE :Optional[Any] = value
return model_state_dict
def __lowerCamelCase ( a_ : Optional[int] , a_ : Dict , a_ : Dict , a_ : List[Any]=False ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Optional[int] = init_clap(a_ , enable_fusion=a_ )
clap_model.eval()
__SCREAMING_SNAKE_CASE :Optional[Any] = clap_model.state_dict()
__SCREAMING_SNAKE_CASE :Tuple = rename_state_dict(a_ )
__SCREAMING_SNAKE_CASE :Any = ClapConfig()
__SCREAMING_SNAKE_CASE :Tuple = enable_fusion
__SCREAMING_SNAKE_CASE :Dict = ClapModel(a_ )
# ignore the spectrogram embedding layer
model.load_state_dict(a_ , strict=a_ )
model.save_pretrained(a_ )
transformers_config.save_pretrained(a_ )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument("--enable_fusion", action="store_true", help="Whether to enable fusion or not")
lowerCamelCase_ = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion) | 498 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class a ( a_ ):
def UpperCamelCase_ ( self ):
lowercase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_lowerCamelCase , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(_lowerCamelCase , 'neck_hidden_sizes' ) )
self.parent.assertTrue(hasattr(_lowerCamelCase , 'num_attention_heads' ) )
class a :
def __init__( self , _lowerCamelCase , _lowerCamelCase=1_3 , _lowerCamelCase=3_2 , _lowerCamelCase=2 , _lowerCamelCase=3 , _lowerCamelCase=6_4_0 , _lowerCamelCase=4 , _lowerCamelCase="silu" , _lowerCamelCase=3 , _lowerCamelCase=3_2 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0_2 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=1_0 , _lowerCamelCase=None , ):
lowercase = parent
lowercase = batch_size
lowercase = image_size
lowercase = patch_size
lowercase = num_channels
lowercase = last_hidden_size
lowercase = num_attention_heads
lowercase = hidden_act
lowercase = conv_kernel_size
lowercase = output_stride
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = classifier_dropout_prob
lowercase = use_labels
lowercase = is_training
lowercase = num_labels
lowercase = initializer_range
lowercase = scope
def UpperCamelCase_ ( self ):
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.num_labels )
lowercase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowercase = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase_ ( self ):
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowercase = MobileViTModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
lowercase = model(_lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowercase = self.num_labels
lowercase = MobileViTForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
lowercase = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowercase = self.num_labels
lowercase = MobileViTForSemanticSegmentation(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
lowercase = model(_lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowercase = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCamelCase_ ( self ):
lowercase = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase = config_and_inputs
lowercase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class a ( a_, a_, unittest.TestCase ):
UpperCAmelCase_ : List[Any] =(
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCAmelCase_ : List[str] =(
{
"feature-extraction": MobileViTModel,
"image-classification": MobileViTForImageClassification,
"image-segmentation": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCAmelCase_ : str =False
UpperCAmelCase_ : int =False
UpperCAmelCase_ : Any =False
UpperCAmelCase_ : Optional[int] =False
def UpperCamelCase_ ( self ):
lowercase = MobileViTModelTester(self )
lowercase = MobileViTConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def UpperCamelCase_ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViT does not use inputs_embeds' )
def UpperCamelCase_ ( self ):
pass
@unittest.skip(reason='MobileViT does not support input and output embeddings' )
def UpperCamelCase_ ( self ):
pass
@unittest.skip(reason='MobileViT does not output attentions' )
def UpperCamelCase_ ( self ):
pass
def UpperCamelCase_ ( self ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(_lowerCamelCase )
lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase = [*signature.parameters.keys()]
lowercase = ['pixel_values']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase_ ( self ):
pass
def UpperCamelCase_ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def UpperCamelCase_ ( self ):
def check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowercase = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
lowercase = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
lowercase = outputs.hidden_states
lowercase = 5
self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
lowercase = 2
for i in range(len(_lowerCamelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def UpperCamelCase_ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
def UpperCamelCase_ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowerCamelCase )
@slow
def UpperCamelCase_ ( self ):
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = MobileViTModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
@cached_property
def UpperCamelCase_ ( self ):
return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self ):
lowercase = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(_lowerCamelCase )
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(images=_lowerCamelCase , return_tensors='pt' ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
lowercase = model(**_lowerCamelCase )
# verify the logits
lowercase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
lowercase = torch.tensor([-1.9_3_6_4, -1.2_3_2_7, -0.4_6_5_3] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 ) )
@slow
def UpperCamelCase_ ( self ):
lowercase = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
lowercase = model.to(_lowerCamelCase )
lowercase = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
lowercase = prepare_img()
lowercase = image_processor(images=_lowerCamelCase , return_tensors='pt' ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
lowercase = model(**_lowerCamelCase )
lowercase = outputs.logits
# verify the logits
lowercase = torch.Size((1, 2_1, 3_2, 3_2) )
self.assertEqual(logits.shape , _lowerCamelCase )
lowercase = torch.tensor(
[
[[6.9_7_1_3, 6.9_7_8_6, 7.2_4_2_2], [7.2_8_9_3, 7.2_8_2_5, 7.4_4_4_6], [7.6_5_8_0, 7.8_7_9_7, 7.9_4_2_0]],
[[-1_0.6_8_6_9, -1_0.3_2_5_0, -1_0.3_4_7_1], [-1_0.4_2_2_8, -9.9_8_6_8, -9.7_1_3_2], [-1_1.0_4_0_5, -1_1.0_2_2_1, -1_0.7_3_1_8]],
[[-3.3_0_8_9, -2.8_5_3_9, -2.6_7_4_0], [-3.2_7_0_6, -2.5_6_2_1, -2.5_1_0_8], [-3.2_5_3_4, -2.6_6_1_5, -2.6_6_5_1]],
] , device=_lowerCamelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _lowerCamelCase , atol=1e-4 ) )
@slow
def UpperCamelCase_ ( self ):
lowercase = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
lowercase = model.to(_lowerCamelCase )
lowercase = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
lowercase = prepare_img()
lowercase = image_processor(images=_lowerCamelCase , return_tensors='pt' ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
lowercase = model(**_lowerCamelCase )
lowercase = outputs.logits.detach().cpu()
lowercase = image_processor.post_process_semantic_segmentation(outputs=_lowerCamelCase , target_sizes=[(5_0, 6_0)] )
lowercase = torch.Size((5_0, 6_0) )
self.assertEqual(segmentation[0].shape , _lowerCamelCase )
lowercase = image_processor.post_process_semantic_segmentation(outputs=_lowerCamelCase )
lowercase = torch.Size((3_2, 3_2) )
self.assertEqual(segmentation[0].shape , _lowerCamelCase )
| 706 |
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
_UpperCamelCase : str = logging.get_logger(__name__)
_UpperCamelCase : Dict = OrderedDict(
[
('audio-spectrogram-transformer', 'ASTFeatureExtractor'),
('beit', 'BeitFeatureExtractor'),
('chinese_clip', 'ChineseCLIPFeatureExtractor'),
('clap', 'ClapFeatureExtractor'),
('clip', 'CLIPFeatureExtractor'),
('clipseg', 'ViTFeatureExtractor'),
('conditional_detr', 'ConditionalDetrFeatureExtractor'),
('convnext', 'ConvNextFeatureExtractor'),
('cvt', 'ConvNextFeatureExtractor'),
('data2vec-audio', 'Wav2Vec2FeatureExtractor'),
('data2vec-vision', 'BeitFeatureExtractor'),
('deformable_detr', 'DeformableDetrFeatureExtractor'),
('deit', 'DeiTFeatureExtractor'),
('detr', 'DetrFeatureExtractor'),
('dinat', 'ViTFeatureExtractor'),
('donut-swin', 'DonutFeatureExtractor'),
('dpt', 'DPTFeatureExtractor'),
('encodec', 'EncodecFeatureExtractor'),
('flava', 'FlavaFeatureExtractor'),
('glpn', 'GLPNFeatureExtractor'),
('groupvit', 'CLIPFeatureExtractor'),
('hubert', 'Wav2Vec2FeatureExtractor'),
('imagegpt', 'ImageGPTFeatureExtractor'),
('layoutlmv2', 'LayoutLMv2FeatureExtractor'),
('layoutlmv3', 'LayoutLMv3FeatureExtractor'),
('levit', 'LevitFeatureExtractor'),
('maskformer', 'MaskFormerFeatureExtractor'),
('mctct', 'MCTCTFeatureExtractor'),
('mobilenet_v1', 'MobileNetV1FeatureExtractor'),
('mobilenet_v2', 'MobileNetV2FeatureExtractor'),
('mobilevit', 'MobileViTFeatureExtractor'),
('nat', 'ViTFeatureExtractor'),
('owlvit', 'OwlViTFeatureExtractor'),
('perceiver', 'PerceiverFeatureExtractor'),
('poolformer', 'PoolFormerFeatureExtractor'),
('regnet', 'ConvNextFeatureExtractor'),
('resnet', 'ConvNextFeatureExtractor'),
('segformer', 'SegformerFeatureExtractor'),
('sew', 'Wav2Vec2FeatureExtractor'),
('sew-d', 'Wav2Vec2FeatureExtractor'),
('speech_to_text', 'Speech2TextFeatureExtractor'),
('speecht5', 'SpeechT5FeatureExtractor'),
('swiftformer', 'ViTFeatureExtractor'),
('swin', 'ViTFeatureExtractor'),
('swinv2', 'ViTFeatureExtractor'),
('table-transformer', 'DetrFeatureExtractor'),
('timesformer', 'VideoMAEFeatureExtractor'),
('tvlt', 'TvltFeatureExtractor'),
('unispeech', 'Wav2Vec2FeatureExtractor'),
('unispeech-sat', 'Wav2Vec2FeatureExtractor'),
('van', 'ConvNextFeatureExtractor'),
('videomae', 'VideoMAEFeatureExtractor'),
('vilt', 'ViltFeatureExtractor'),
('vit', 'ViTFeatureExtractor'),
('vit_mae', 'ViTFeatureExtractor'),
('vit_msn', 'ViTFeatureExtractor'),
('wav2vec2', 'Wav2Vec2FeatureExtractor'),
('wav2vec2-conformer', 'Wav2Vec2FeatureExtractor'),
('wavlm', 'Wav2Vec2FeatureExtractor'),
('whisper', 'WhisperFeatureExtractor'),
('xclip', 'CLIPFeatureExtractor'),
('yolos', 'YolosFeatureExtractor'),
]
)
_UpperCamelCase : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def _SCREAMING_SNAKE_CASE ( __snake_case : str ):
'''simple docstring'''
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
lowercase = model_type_to_module_name(__snake_case )
lowercase = importlib.import_module(f'.{module_name}' , 'transformers.models' )
try:
return getattr(__snake_case , __snake_case )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(__snake_case , '__name__' , __snake_case ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
lowercase = importlib.import_module('transformers' )
if hasattr(__snake_case , __snake_case ):
return getattr(__snake_case , __snake_case )
return None
def _SCREAMING_SNAKE_CASE ( __snake_case : Union[str, os.PathLike] , __snake_case : Optional[Union[str, os.PathLike]] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : Optional[Dict[str, str]] = None , __snake_case : Optional[Union[bool, str]] = None , __snake_case : Optional[str] = None , __snake_case : bool = False , **__snake_case : int , ):
'''simple docstring'''
lowercase = get_file_from_repo(
__snake_case , __snake_case , cache_dir=__snake_case , force_download=__snake_case , resume_download=__snake_case , proxies=__snake_case , use_auth_token=__snake_case , revision=__snake_case , local_files_only=__snake_case , )
if resolved_config_file is None:
logger.info(
'Could not locate the feature extractor configuration file, will try to use the model config instead.' )
return {}
with open(__snake_case , encoding='utf-8' ) as reader:
return json.load(__snake_case )
class a :
def __init__( self ):
raise EnvironmentError(
'AutoFeatureExtractor is designed to be instantiated '
'using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(_lowerCamelCase )
def UpperCamelCase_ ( cls , _lowerCamelCase , **_lowerCamelCase ):
lowercase = kwargs.pop('config' , _lowerCamelCase )
lowercase = kwargs.pop('trust_remote_code' , _lowerCamelCase )
lowercase = True
lowercase , lowercase = FeatureExtractionMixin.get_feature_extractor_dict(_lowerCamelCase , **_lowerCamelCase )
lowercase = config_dict.get('feature_extractor_type' , _lowerCamelCase )
lowercase = None
if "AutoFeatureExtractor" in config_dict.get('auto_map' , {} ):
lowercase = config_dict['auto_map']['AutoFeatureExtractor']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
lowercase = AutoConfig.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
# It could be in `config.feature_extractor_type``
lowercase = getattr(_lowerCamelCase , 'feature_extractor_type' , _lowerCamelCase )
if hasattr(_lowerCamelCase , 'auto_map' ) and "AutoFeatureExtractor" in config.auto_map:
lowercase = config.auto_map['AutoFeatureExtractor']
if feature_extractor_class is not None:
lowercase = feature_extractor_class_from_name(_lowerCamelCase )
lowercase = feature_extractor_auto_map is not None
lowercase = feature_extractor_class is not None or type(_lowerCamelCase ) in FEATURE_EXTRACTOR_MAPPING
lowercase = resolve_trust_remote_code(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if has_remote_code and trust_remote_code:
lowercase = get_class_from_dynamic_module(
_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
lowercase = kwargs.pop('code_revision' , _lowerCamelCase )
if os.path.isdir(_lowerCamelCase ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(_lowerCamelCase , **_lowerCamelCase )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(_lowerCamelCase , **_lowerCamelCase )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(_lowerCamelCase ) in FEATURE_EXTRACTOR_MAPPING:
lowercase = FEATURE_EXTRACTOR_MAPPING[type(_lowerCamelCase )]
return feature_extractor_class.from_dict(_lowerCamelCase , **_lowerCamelCase )
raise ValueError(
F'Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '
F'`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '
F'`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}' )
@staticmethod
def UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
FEATURE_EXTRACTOR_MAPPING.register(_lowerCamelCase , _lowerCamelCase )
| 134 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a__ :
def __init__(self : Tuple, __UpperCAmelCase : Tuple, __UpperCAmelCase : List[str]=13, __UpperCAmelCase : List[str]=32, __UpperCAmelCase : Any=3, __UpperCAmelCase : Dict=4, __UpperCAmelCase : str=[10, 20, 30, 40], __UpperCAmelCase : List[str]=[2, 2, 3, 2], __UpperCAmelCase : Dict=True, __UpperCAmelCase : int=True, __UpperCAmelCase : List[str]=37, __UpperCAmelCase : Optional[Any]="gelu", __UpperCAmelCase : Tuple=10, __UpperCAmelCase : Tuple=0.02, __UpperCAmelCase : Dict=["stage2", "stage3", "stage4"], __UpperCAmelCase : Tuple=[2, 3, 4], __UpperCAmelCase : Dict=None, ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = parent
SCREAMING_SNAKE_CASE : Optional[int] = batch_size
SCREAMING_SNAKE_CASE : List[str] = image_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE : Dict = num_stages
SCREAMING_SNAKE_CASE : Tuple = hidden_sizes
SCREAMING_SNAKE_CASE : Optional[Any] = depths
SCREAMING_SNAKE_CASE : List[Any] = is_training
SCREAMING_SNAKE_CASE : List[Any] = use_labels
SCREAMING_SNAKE_CASE : List[str] = intermediate_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE : Union[str, Any] = num_labels
SCREAMING_SNAKE_CASE : List[str] = initializer_range
SCREAMING_SNAKE_CASE : Tuple = out_features
SCREAMING_SNAKE_CASE : List[Any] = out_indices
SCREAMING_SNAKE_CASE : Any = scope
def lowercase__ (self : Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Any = None
if self.use_labels:
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size], self.num_labels )
SCREAMING_SNAKE_CASE : Optional[int] = self.get_config()
return config, pixel_values, labels
def lowercase__ (self : str ) -> Dict:
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels, hidden_sizes=self.hidden_sizes, depths=self.depths, num_stages=self.num_stages, hidden_act=self.hidden_act, is_decoder=lowerCamelCase__, initializer_range=self.initializer_range, out_features=self.out_features, out_indices=self.out_indices, num_labels=self.num_labels, )
def lowercase__ (self : Optional[Any], __UpperCAmelCase : Tuple, __UpperCAmelCase : int, __UpperCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = ConvNextModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE : str = model(lowerCamelCase__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), )
def lowercase__ (self : Optional[int], __UpperCAmelCase : Dict, __UpperCAmelCase : Optional[int], __UpperCAmelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = ConvNextForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = model(lowerCamelCase__, labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def lowercase__ (self : int, __UpperCAmelCase : Optional[Any], __UpperCAmelCase : Any, __UpperCAmelCase : List[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = ConvNextBackbone(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE : int = model(lowerCamelCase__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ), len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ), len(config.out_features ) )
self.parent.assertListEqual(model.channels, config.hidden_sizes[1:] )
# verify backbone works with out_features=None
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Union[str, Any] = ConvNextBackbone(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE : int = model(lowerCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ), 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ), 1 )
self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]] )
def lowercase__ (self : List[str] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE : List[str] = config_and_inputs
SCREAMING_SNAKE_CASE : List[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class a__ ( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ):
__magic_name__ : Optional[Any] = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
__magic_name__ : Dict = (
{'feature-extraction': ConvNextModel, 'image-classification': ConvNextForImageClassification}
if is_torch_available()
else {}
)
__magic_name__ : Any = True
__magic_name__ : List[str] = False
__magic_name__ : Optional[Any] = False
__magic_name__ : List[str] = False
__magic_name__ : Optional[Any] = False
def lowercase__ (self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = ConvNextModelTester(self )
SCREAMING_SNAKE_CASE : str = ConfigTester(self, config_class=lowerCamelCase__, has_text_modality=lowerCamelCase__, hidden_size=37 )
def lowercase__ (self : Optional[int] ) -> Dict:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ (self : int ) -> List[str]:
"""simple docstring"""
return
@unittest.skip(reason='''ConvNext does not use inputs_embeds''' )
def lowercase__ (self : int ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason='''ConvNext does not support input and output embeddings''' )
def lowercase__ (self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''ConvNext does not use feedforward chunking''' )
def lowercase__ (self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
pass
def lowercase__ (self : Any ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Tuple = model_class(lowerCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : str = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1], lowerCamelCase__ )
def lowercase__ (self : Tuple ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def lowercase__ (self : Optional[int] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase__ )
def lowercase__ (self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
def check_hidden_states_output(__UpperCAmelCase : Any, __UpperCAmelCase : Optional[Any], __UpperCAmelCase : Optional[int] ):
SCREAMING_SNAKE_CASE : List[str] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : int = model(**self._prepare_for_class(lowerCamelCase__, lowerCamelCase__ ) )
SCREAMING_SNAKE_CASE : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase__ ), expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[int] = True
check_hidden_states_output(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Union[str, Any] = True
check_hidden_states_output(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
def lowercase__ (self : Any ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
@slow
def lowercase__ (self : Any ) -> int:
"""simple docstring"""
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Union[str, Any] = ConvNextModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def __lowercase ():
SCREAMING_SNAKE_CASE : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class a__ ( unittest.TestCase ):
@cached_property
def lowercase__ (self : Any ) -> Optional[Any]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/convnext-tiny-224''' ) if is_vision_available() else None
@slow
def lowercase__ (self : Union[str, Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = ConvNextForImageClassification.from_pretrained('''facebook/convnext-tiny-224''' ).to(lowerCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE : Tuple = prepare_img()
SCREAMING_SNAKE_CASE : List[str] = image_processor(images=lowerCamelCase__, return_tensors='''pt''' ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(**lowerCamelCase__ )
# verify the logits
SCREAMING_SNAKE_CASE : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape, lowerCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([-0.0260, -0.4739, 0.1911] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], lowerCamelCase__, atol=1e-4 ) )
@require_torch
class a__ ( unittest.TestCase, UpperCAmelCase__ ):
__magic_name__ : Union[str, Any] = (ConvNextBackbone,) if is_torch_available() else ()
__magic_name__ : List[Any] = ConvNextConfig
__magic_name__ : Optional[int] = False
def lowercase__ (self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = ConvNextModelTester(self )
| 507 | import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def snake_case__ ( lowercase , lowercase ):
assert isinstance(lowercase , lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def snake_case__ ( lowercase , lowercase , lowercase , lowercase ):
lowerCAmelCase_: Any = tmp_path / "cache"
lowerCAmelCase_: int = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase_: Optional[int] = SqlDatasetReader(
"dataset" , "sqlite:///" + sqlite_path , cache_dir=lowercase , keep_in_memory=lowercase ).read()
_check_sql_dataset(lowercase , lowercase )
@require_sqlalchemy
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def snake_case__ ( lowercase , lowercase , lowercase , lowercase ):
lowerCAmelCase_: List[str] = tmp_path / "cache"
lowerCAmelCase_: int = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCAmelCase_: List[str] = features.copy() if features else default_expected_features
lowerCAmelCase_: Any = (
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase_: int = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , features=lowercase , cache_dir=lowercase ).read()
_check_sql_dataset(lowercase , lowercase )
def snake_case__ ( lowercase ):
with contextlib.closing(sqlitea.connect(lowercase ) ) as con:
lowerCAmelCase_: Any = con.cursor()
cur.execute("SELECT * FROM dataset" )
for row in cur:
yield row
@require_sqlalchemy
def snake_case__ ( lowercase , lowercase , lowercase ):
lowerCAmelCase_: Optional[int] = tmp_path / "cache"
lowerCAmelCase_: Optional[Any] = os.path.join(lowercase , "tmp.sql" )
lowerCAmelCase_: str = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=lowercase ).read()
SqlDatasetWriter(lowercase , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=1 ).write()
lowerCAmelCase_: Union[str, Any] = iter_sql_file(lowercase )
lowerCAmelCase_: str = iter_sql_file(lowercase )
for rowa, rowa in zip(lowercase , lowercase ):
assert rowa == rowa
@require_sqlalchemy
def snake_case__ ( lowercase , lowercase , lowercase ):
lowerCAmelCase_: str = tmp_path / "cache"
lowerCAmelCase_: Optional[int] = os.path.join(lowercase , "tmp.sql" )
lowerCAmelCase_: Optional[Any] = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=lowercase ).read()
SqlDatasetWriter(lowercase , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=2 ).write()
lowerCAmelCase_: Optional[Any] = iter_sql_file(lowercase )
lowerCAmelCase_: Optional[int] = iter_sql_file(lowercase )
for rowa, rowa in zip(lowercase , lowercase ):
assert rowa == rowa
@require_sqlalchemy
def snake_case__ ( lowercase , lowercase , lowercase ):
lowerCAmelCase_: Union[str, Any] = tmp_path / "cache"
lowerCAmelCase_: int = os.path.join(lowercase , "tmp.sql" )
lowerCAmelCase_: Any = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=lowercase ).read()
with pytest.raises(lowercase ):
SqlDatasetWriter(lowercase , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=0 ).write() | 613 | 0 |
class SCREAMING_SNAKE_CASE__ :
def __init__(self : Optional[Any] ):
"""simple docstring"""
__snake_case = {}
def a (self : str ):
"""simple docstring"""
print(self.vertex )
for i in self.vertex:
print(a__ , ''' -> ''' , ''' -> '''.join([str(a__ ) for j in self.vertex[i]] ) )
def a (self : Any , a__ : int , a__ : int ):
"""simple docstring"""
if from_vertex in self.vertex:
self.vertex[from_vertex].append(a__ )
else:
# else make a new vertex
__snake_case = [to_vertex]
def a (self : Tuple ):
"""simple docstring"""
__snake_case = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(a__ , a__ )
def a (self : Any , a__ : int , a__ : list ):
"""simple docstring"""
__snake_case = True
print(a__ , end=''' ''' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(a__ , a__ )
if __name__ == "__main__":
snake_case_ = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('DFS:')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 388 |
import numpy
# List of input, output pairs
snake_case_ = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
snake_case_ = (((515, 22, 13), 555), ((61, 35, 49), 150))
snake_case_ = [2, 4, 1, 5]
snake_case_ = len(train_data)
snake_case_ = 0.0_09
def lowerCamelCase__ ( snake_case_ : Tuple , snake_case_ : str="train" ) -> Tuple:
return calculate_hypothesis_value(snake_case_ , snake_case_ ) - output(
snake_case_ , snake_case_ )
def lowerCamelCase__ ( snake_case_ : Any ) -> List[str]:
__snake_case = 0
for i in range(len(snake_case_ ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def lowerCamelCase__ ( snake_case_ : Any , snake_case_ : int ) -> int:
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def lowerCamelCase__ ( snake_case_ : List[Any] , snake_case_ : Optional[int] ) -> Dict:
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def lowerCamelCase__ ( snake_case_ : Optional[int] , snake_case_ : List[Any]=m ) -> Any:
__snake_case = 0
for i in range(snake_case_ ):
if index == -1:
summation_value += _error(snake_case_ )
else:
summation_value += _error(snake_case_ ) * train_data[i][0][index]
return summation_value
def lowerCamelCase__ ( snake_case_ : Optional[Any] ) -> Any:
__snake_case = summation_of_cost_derivative(snake_case_ , snake_case_ ) / m
return cost_derivative_value
def lowerCamelCase__ ( ) -> int:
global parameter_vector
# Tune these values to set a tolerance value for predicted output
__snake_case = 0.000_002
__snake_case = 0
__snake_case = 0
while True:
j += 1
__snake_case = [0, 0, 0, 0]
for i in range(0 , len(snake_case_ ) ):
__snake_case = get_cost_derivative(i - 1 )
__snake_case = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
snake_case_ , snake_case_ , atol=snake_case_ , rtol=snake_case_ , ):
break
__snake_case = temp_parameter_vector
print(('''Number of iterations:''', j) )
def lowerCamelCase__ ( ) -> Optional[Any]:
for i in range(len(snake_case_ ) ):
print(('''Actual output value:''', output(snake_case_ , '''test''' )) )
print(('''Hypothesis output:''', calculate_hypothesis_value(snake_case_ , '''test''' )) )
if __name__ == "__main__":
run_gradient_descent()
print('\nTesting gradient descent for a linear hypothesis function.\n')
test_gradient_descent()
| 388 | 1 |
'''simple docstring'''
import baseaa
def __A ( a_ : str ):
return baseaa.aaaencode(string.encode("utf-8" ) )
def __A ( a_ : bytes ):
return baseaa.aaadecode(a_ ).decode("utf-8" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 525 |
'''simple docstring'''
lowerCAmelCase = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)]
def __A ( a_ : int ):
lowerCAmelCase : Optional[int] = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 1_0_0_0_0_0]
number //= 1_0_0_0_0_0
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
lowerCAmelCase = [None] * 10_00_00_00
lowerCAmelCase = True
lowerCAmelCase = False
def __A ( a_ : int ):
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
lowerCAmelCase : Dict = chain(next_number(a_ ) )
lowerCAmelCase : Union[str, Any] = number_chain
while number < 1_0_0_0_0_0_0_0:
lowerCAmelCase : Any = number_chain
number *= 1_0
return number_chain
def __A ( a_ : int = 1_0_0_0_0_0_0_0 ):
for i in range(1 ,a_ ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(a_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{solution() = }''')
| 525 | 1 |
a_ : List[Any] = {str(digit): digit**5 for digit in range(10)}
def _SCREAMING_SNAKE_CASE ( snake_case_ : int ):
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(snake_case_ ) )
def _SCREAMING_SNAKE_CASE ( ):
return sum(
number
for number in range(1000 , 100_0000 )
if number == digits_fifth_powers_sum(snake_case_ ) )
if __name__ == "__main__":
print(solution()) | 714 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return " ".join(
''''''.join(word[::-1] ) if len(snake_case_ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('Hey wollef sroirraw')) | 678 | 0 |
'''simple docstring'''
from __future__ import annotations
def __lowerCAmelCase ( a_ , a_ = None , a_ = None ) -> None:
'''simple docstring'''
if start is None:
SCREAMING_SNAKE_CASE : Optional[int] = 0
if end is None:
SCREAMING_SNAKE_CASE : Any = len(_a ) - 1
if start >= end:
return
SCREAMING_SNAKE_CASE : Dict = (start + end) // 2
slowsort(_a , _a , _a )
slowsort(_a , mid + 1 , _a )
if sequence[end] < sequence[mid]:
SCREAMING_SNAKE_CASE : Any = sequence[mid], sequence[end]
slowsort(_a , _a , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 251 |
from typing import Any
import numpy as np
def UpperCamelCase ( _a ) -> bool:
'''simple docstring'''
return np.array_equal(_a , matrix.conjugate().T )
def UpperCamelCase ( _a , _a ) -> Any:
'''simple docstring'''
lowercase_ :str = v.conjugate().T
lowercase_ :int = v_star.dot(_a )
assert isinstance(_a , np.ndarray )
return (v_star_dot.dot(_a )) / (v_star.dot(_a ))
def UpperCamelCase ( ) -> None:
'''simple docstring'''
lowercase_ :str = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
lowercase_ :Optional[int] = np.array([[1], [2], [3]] )
assert is_hermitian(_a ), f"{a} is not hermitian."
print(rayleigh_quotient(_a , _a ) )
lowercase_ :Any = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(_a ), f"{a} is not hermitian."
assert rayleigh_quotient(_a , _a ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 257 | 0 |
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
_lowercase : List[Any] = logging.get_logger(__name__)
_lowercase : Tuple = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_lowercase : str = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
_lowercase : List[str] = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
_lowercase : List[Any] = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
_lowercase : Dict = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
_lowercase : Tuple = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
_lowercase : int = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
_lowercase : Any = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
_lowercase : List[str] = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
_lowercase : Any = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_lowercase : Dict = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
_lowercase : List[Any] = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
_lowercase : Union[str, Any] = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(_UpperCAmelCase)
class __magic_name__ :
def __call__( self : List[Any] , lowercase_ : List[Any] , lowercase_ : Optional[str] = None , lowercase_ : Optional[str] = None , lowercase_ : Union[bool, str] = False , lowercase_ : Union[bool, str] = False , lowercase_ : Optional[int] = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : Optional[bool] = None , **lowercase_ : int , ):
if titles is None and texts is None:
return super().__call__(
lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , return_tensors=lowercase_ , return_attention_mask=lowercase_ , **lowercase_ , )
elif titles is None or texts is None:
lowercase_ : Tuple = titles if texts is None else texts
return super().__call__(
lowercase_ , lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , return_tensors=lowercase_ , return_attention_mask=lowercase_ , **lowercase_ , )
lowercase_ : Optional[int] = titles if not isinstance(lowercase_ , lowercase_ ) else [titles]
lowercase_ : List[str] = texts if not isinstance(lowercase_ , lowercase_ ) else [texts]
lowercase_ : str = len(lowercase_ )
lowercase_ : int = questions if not isinstance(lowercase_ , lowercase_ ) else [questions] * n_passages
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError(
f'''There should be as many titles than texts but got {len(lowercase_ )} titles and {len(lowercase_ )} texts.''' )
lowercase_ : Tuple = super().__call__(lowercase_ , lowercase_ , padding=lowercase_ , truncation=lowercase_ )["""input_ids"""]
lowercase_ : int = super().__call__(lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ )["""input_ids"""]
lowercase_ : str = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowercase_ , lowercase_ )
]
}
if return_attention_mask is not False:
lowercase_ : Optional[Any] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
lowercase_ : Any = attention_mask
return self.pad(lowercase_ , padding=lowercase_ , max_length=lowercase_ , return_tensors=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : BatchEncoding , lowercase_ : DPRReaderOutput , lowercase_ : int = 16 , lowercase_ : int = 64 , lowercase_ : int = 4 , ):
lowercase_ : str = reader_input["""input_ids"""]
lowercase_ , lowercase_ , lowercase_ : Tuple = reader_output[:3]
lowercase_ : Optional[Any] = len(lowercase_ )
lowercase_ : Optional[Any] = sorted(range(lowercase_ ) , reverse=lowercase_ , key=relevance_logits.__getitem__ )
lowercase_ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
lowercase_ : Optional[int] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
lowercase_ : int = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowercase_ : Any = sequence_ids.index(self.pad_token_id )
else:
lowercase_ : Optional[int] = len(lowercase_ )
lowercase_ : Any = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowercase_ , top_spans=lowercase_ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowercase_ , start_index=lowercase_ , end_index=lowercase_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(lowercase_ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : List[int] , lowercase_ : List[int] , lowercase_ : int , lowercase_ : int , ):
lowercase_ : int = []
for start_index, start_score in enumerate(lowercase_ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
lowercase_ : Dict = sorted(lowercase_ , key=lambda lowercase_ : x[1] , reverse=lowercase_ )
lowercase_ : Any = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f'''Wrong span indices: [{start_index}:{end_index}]''' )
lowercase_ : Tuple = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f'''Span is too long: {length} > {max_answer_length}''' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowercase_ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_UpperCAmelCase)
class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = READER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = READER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase__ = ['''input_ids''', '''attention_mask''']
| 30 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Optional[Any] = logging.get_logger(__name__)
_lowercase : Union[str, Any] = {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''speech_to_text'''
UpperCamelCase__ = ['''past_key_values''']
UpperCamelCase__ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : str , lowercase_ : Optional[int]=10000 , lowercase_ : int=12 , lowercase_ : Any=2048 , lowercase_ : Any=4 , lowercase_ : Dict=6 , lowercase_ : Any=2048 , lowercase_ : List[str]=4 , lowercase_ : str=0.0 , lowercase_ : str=0.0 , lowercase_ : Union[str, Any]=True , lowercase_ : List[Any]=True , lowercase_ : int="relu" , lowercase_ : str=256 , lowercase_ : int=0.1 , lowercase_ : int=0.0 , lowercase_ : str=0.0 , lowercase_ : Optional[int]=0.02 , lowercase_ : str=2 , lowercase_ : Union[str, Any]=True , lowercase_ : Any=1 , lowercase_ : Dict=0 , lowercase_ : List[str]=2 , lowercase_ : List[Any]=6000 , lowercase_ : Tuple=1024 , lowercase_ : str=2 , lowercase_ : Any=(5, 5) , lowercase_ : Union[str, Any]=1024 , lowercase_ : Dict=80 , lowercase_ : List[Any]=1 , **lowercase_ : int , ):
lowercase_ : List[Any] = vocab_size
lowercase_ : str = d_model
lowercase_ : List[Any] = encoder_ffn_dim
lowercase_ : str = encoder_layers
lowercase_ : Dict = encoder_attention_heads
lowercase_ : str = decoder_ffn_dim
lowercase_ : int = decoder_layers
lowercase_ : Any = decoder_attention_heads
lowercase_ : Any = dropout
lowercase_ : Dict = attention_dropout
lowercase_ : Optional[int] = activation_dropout
lowercase_ : Any = activation_function
lowercase_ : Union[str, Any] = init_std
lowercase_ : str = encoder_layerdrop
lowercase_ : Optional[int] = decoder_layerdrop
lowercase_ : Dict = use_cache
lowercase_ : Union[str, Any] = encoder_layers
lowercase_ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase_ : Dict = max_source_positions
lowercase_ : Optional[int] = max_target_positions
lowercase_ : Tuple = num_conv_layers
lowercase_ : Tuple = list(lowercase_ )
lowercase_ : Union[str, Any] = conv_channels
lowercase_ : str = input_feat_per_channel
lowercase_ : str = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` """
f'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, '''
f'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , **lowercase_ , )
| 30 | 1 |
"""simple docstring"""
from timeit import timeit
UpperCamelCase__ = {
'MALAYALAM': True,
'String': False,
'rotor': True,
'level': True,
'A': True,
'BB': True,
'ABC': False,
'amanaplanacanalpanama': True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def lowerCamelCase ( _snake_case ):
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : int = len(UpperCamelCase_ ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def lowerCamelCase ( _snake_case ):
UpperCAmelCase__ : str = len(UpperCamelCase_ ) // 2
UpperCAmelCase__ : Dict = len(UpperCamelCase_ )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(UpperCamelCase_ ) )
def lowerCamelCase ( _snake_case ):
if len(UpperCamelCase_ ) <= 2:
return True
if s[0] == s[len(UpperCamelCase_ ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def lowerCamelCase ( _snake_case ):
return s == s[::-1]
def lowerCamelCase ( _snake_case ):
UpperCAmelCase__ : Optional[Any] = F'''all({name}(key) is value for key, value in test_data.items())'''
UpperCAmelCase__ : Dict = F'''from __main__ import test_data, {name}'''
UpperCAmelCase__ : Tuple = 500000
UpperCAmelCase__ : Tuple = timeit(stmt=UpperCamelCase_ ,setup=UpperCamelCase_ ,number=UpperCamelCase_ )
print(F'''{name:<35} finished {number:,} runs in {result:.5f} seconds''' )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(f'{key:21} {value}')
print('a man a plan a canal panama')
# finished 500,000 runs in 0.46793 seconds
benchmark_function('is_palindrome_slice')
# finished 500,000 runs in 0.85234 seconds
benchmark_function('is_palindrome')
# finished 500,000 runs in 1.32028 seconds
benchmark_function('is_palindrome_recursive')
# finished 500,000 runs in 2.08679 seconds
benchmark_function('is_palindrome_traversal')
| 110 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
@slow
@require_torch
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
lowerCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny' , 'prajjwal1/bert-tiny' )
lowerCamelCase = BertTokenizer.from_pretrained('bert-base-uncased' )
lowerCamelCase = bertabert.config.encoder.vocab_size
lowerCamelCase = tokenizer.sep_token_id
lowerCamelCase = tokenizer.cls_token_id
lowerCamelCase = 128
lowerCamelCase = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='train[:1%]' )
lowerCamelCase = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='validation[:1%]' )
lowerCamelCase = train_dataset.select(range(32 ) )
lowerCamelCase = val_dataset.select(range(16 ) )
lowerCamelCase = 4
def _map_to_encoder_decoder_inputs(__snake_case : List[str] ):
# Tokenizer will automatically set [BOS] <text> [EOS]
lowerCamelCase = tokenizer(batch['article'] , padding='max_length' , truncation=__snake_case , max_length=512 )
lowerCamelCase = tokenizer(batch['highlights'] , padding='max_length' , truncation=__snake_case , max_length=128 )
lowerCamelCase = inputs.input_ids
lowerCamelCase = inputs.attention_mask
lowerCamelCase = outputs.input_ids
lowerCamelCase = outputs.input_ids.copy()
lowerCamelCase = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels']
]
lowerCamelCase = outputs.attention_mask
assert all(len(__snake_case ) == 512 for x in inputs.input_ids )
assert all(len(__snake_case ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(__snake_case : int ):
lowerCamelCase = pred.label_ids
lowerCamelCase = pred.predictions
# all unnecessary tokens are removed
lowerCamelCase = tokenizer.batch_decode(__snake_case , skip_special_tokens=__snake_case )
lowerCamelCase = tokenizer.batch_decode(__snake_case , skip_special_tokens=__snake_case )
lowerCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(__snake_case ) )] ) / len(__snake_case )
return {"accuracy": accuracy}
# map train dataset
lowerCamelCase = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=__snake_case , batch_size=__snake_case , remove_columns=['article', 'highlights'] , )
train_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
# same for validation dataset
lowerCamelCase = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=__snake_case , batch_size=__snake_case , remove_columns=['article', 'highlights'] , )
val_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
lowerCamelCase = self.get_auto_remove_tmp_dir()
lowerCamelCase = SeqaSeqTrainingArguments(
output_dir=__snake_case , per_device_train_batch_size=__snake_case , per_device_eval_batch_size=__snake_case , predict_with_generate=__snake_case , evaluation_strategy='steps' , do_train=__snake_case , do_eval=__snake_case , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
lowerCamelCase = SeqaSeqTrainer(
model=__snake_case , args=__snake_case , compute_metrics=_compute_metrics , train_dataset=__snake_case , eval_dataset=__snake_case , tokenizer=__snake_case , )
# start training
trainer.train()
| 246 | 0 |
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
def __init__( self , *lowercase , lowercase=None , lowercase=None , **lowercase ) -> Tuple:
super().__init__(*__a , **__a )
lowerCamelCase_ = eval_examples
lowerCamelCase_ = post_process_function
def SCREAMING_SNAKE_CASE_( self , lowercase=None , lowercase=None , lowercase=None , lowercase = "eval" ) -> Tuple:
lowerCamelCase_ = self.eval_dataset if eval_dataset is None else eval_dataset
lowerCamelCase_ = self.get_eval_dataloader(__a )
lowerCamelCase_ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowerCamelCase_ = self.compute_metrics
lowerCamelCase_ = None
lowerCamelCase_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
lowerCamelCase_ = time.time()
try:
lowerCamelCase_ = eval_loop(
__a , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__a , metric_key_prefix=__a , )
finally:
lowerCamelCase_ = compute_metrics
lowerCamelCase_ = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
__a , __a , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowerCamelCase_ = self.post_process_function(__a , __a , output.predictions )
lowerCamelCase_ = self.compute_metrics(__a )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowerCamelCase_ = metrics.pop(__a )
metrics.update(output.metrics )
else:
lowerCamelCase_ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__a )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowerCamelCase_ = self.callback_handler.on_evaluate(self.args , self.state , self.control , __a )
return metrics
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase=None , lowercase = "test" ) -> str:
lowerCamelCase_ = self.get_test_dataloader(__a )
# Temporarily disable metric computation, we will do it in the loop here.
lowerCamelCase_ = self.compute_metrics
lowerCamelCase_ = None
lowerCamelCase_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
lowerCamelCase_ = time.time()
try:
lowerCamelCase_ = eval_loop(
__a , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__a , metric_key_prefix=__a , )
finally:
lowerCamelCase_ = compute_metrics
lowerCamelCase_ = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
__a , __a , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowerCamelCase_ = self.post_process_function(__a , __a , output.predictions , "predict" )
lowerCamelCase_ = self.compute_metrics(__a )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowerCamelCase_ = metrics.pop(__a )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__a )
| 707 |
class _SCREAMING_SNAKE_CASE :
def __init__( self ) -> List[Any]:
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = {}
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> List[str]:
if vertex not in self.adjacency:
lowerCamelCase_ = {}
self.num_vertices += 1
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase ) -> int:
self.add_vertex(lowercase )
self.add_vertex(lowercase )
if head == tail:
return
lowerCamelCase_ = weight
lowerCamelCase_ = weight
def SCREAMING_SNAKE_CASE_( self ) -> int:
lowerCamelCase_ = self.get_edges()
for edge in edges:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge
edges.remove((tail, head, weight) )
for i in range(len(lowercase ) ):
lowerCamelCase_ = list(edges[i] )
edges.sort(key=lambda lowercase : e[2] )
for i in range(len(lowercase ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
lowerCamelCase_ = edges[i][2] + 1
for edge in edges:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge
lowerCamelCase_ = weight
lowerCamelCase_ = weight
def __str__( self ) -> Any:
lowerCamelCase_ = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
lowerCamelCase_ = self.adjacency[head][tail]
string += f'{head} -> {tail} == {weight}\n'
return string.rstrip("\n" )
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
return self.adjacency.keys()
@staticmethod
def SCREAMING_SNAKE_CASE_( lowercase=None , lowercase=None ) -> Optional[int]:
lowerCamelCase_ = Graph()
if vertices is None:
lowerCamelCase_ = []
if edges is None:
lowerCamelCase_ = []
for vertex in vertices:
g.add_vertex(lowercase )
for edge in edges:
g.add_edge(*lowercase )
return g
class _SCREAMING_SNAKE_CASE :
def __init__( self ) -> Tuple:
lowerCamelCase_ = {}
lowerCamelCase_ = {}
def __len__( self ) -> Any:
return len(self.parent )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Optional[Any]:
if item in self.parent:
return self.find(lowercase )
lowerCamelCase_ = item
lowerCamelCase_ = 0
return item
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Optional[Any]:
if item not in self.parent:
return self.make_set(lowercase )
if item != self.parent[item]:
lowerCamelCase_ = self.find(self.parent[item] )
return self.parent[item]
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> List[str]:
lowerCamelCase_ = self.find(lowercase )
lowerCamelCase_ = self.find(lowercase )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
lowerCamelCase_ = roota
return roota
if self.rank[roota] < self.rank[roota]:
lowerCamelCase_ = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
lowerCamelCase_ = roota
return roota
return None
@staticmethod
def SCREAMING_SNAKE_CASE_( lowercase ) -> Dict:
lowerCamelCase_ = graph.num_vertices
lowerCamelCase_ = Graph.UnionFind()
lowerCamelCase_ = []
while num_components > 1:
lowerCamelCase_ = {}
for vertex in graph.get_vertices():
lowerCamelCase_ = -1
lowerCamelCase_ = graph.get_edges()
for edge in edges:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge
edges.remove((tail, head, weight) )
for edge in edges:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge
lowerCamelCase_ = union_find.find(lowercase )
lowerCamelCase_ = union_find.find(lowercase )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowerCamelCase_ = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowerCamelCase_ = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = cheap_edge[vertex]
if union_find.find(lowercase ) != union_find.find(lowercase ):
union_find.union(lowercase , lowercase )
mst_edges.append(cheap_edge[vertex] )
lowerCamelCase_ = num_components - 1
lowerCamelCase_ = Graph.build(edges=lowercase )
return mst
| 313 | 0 |
from itertools import permutations
def UpperCamelCase_ ( __a ) -> bool:
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
a__ : Tuple = [7, 11, 13, 17]
for i, test in enumerate(__a ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def UpperCamelCase_ ( __a = 10 ) -> int:
return sum(
int("".join(map(__a , __a ) ) )
for num in permutations(range(__a ) )
if is_substring_divisible(__a ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 37 |
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
UpperCamelCase : Optional[int] = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def UpperCamelCase_ ( __a ) -> Any:
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def UpperCamelCase_ ( __a , __a , __a ) -> Any:
return max(metric_fn(__a , __a ) for gt in ground_truths )
def UpperCamelCase_ ( __a , __a , __a ) -> List[str]:
a__ : Tuple = [line.strip() for line in open(__a , "r" ).readlines()]
a__ : Tuple = []
if args.gold_data_mode == "qa":
a__ : Any = pd.read_csv(__a , sep="\t" , header=__a )
for answer_list in data[1]:
a__ : Union[str, Any] = ast.literal_eval(__a )
answers.append(__a )
else:
a__ : List[str] = [line.strip() for line in open(__a , "r" ).readlines()]
a__ : List[str] = [[reference] for reference in references]
a__ : List[str] = 0
for prediction, ground_truths in zip(__a , __a ):
total += 1
em += metric_max_over_ground_truths(__a , __a , __a )
fa += metric_max_over_ground_truths(__a , __a , __a )
a__ : Dict = 100.0 * em / total
a__ : Optional[Any] = 100.0 * fa / total
logger.info(f'''F1: {fa:.2f}''' )
logger.info(f'''EM: {em:.2f}''' )
def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]:
a__ : Optional[Any] = args.k
a__ : str = [line.strip() for line in open(__a , "r" ).readlines()]
a__ : Tuple = [line.strip() for line in open(__a , "r" ).readlines()]
a__ : Tuple = 0
for hypo, reference in zip(__a , __a ):
a__ : Any = set(hypo.split("\t" )[:k] )
a__ : Union[str, Any] = set(reference.split("\t" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
a__ : Union[str, Any] = 100.0 * em / total
logger.info(f'''Precision@{k}: {em: .2f}''' )
def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]:
def strip_title(__a ):
if title.startswith("\"" ):
a__ : Optional[Any] = title[1:]
if title.endswith("\"" ):
a__ : Union[str, Any] = title[:-1]
return title
a__ : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__a , return_tensors="pt" , padding=__a , truncation=__a , )["input_ids"].to(args.device )
a__ : Optional[int] = rag_model.rag.question_encoder(__a )
a__ : Union[str, Any] = question_enc_outputs[0]
a__ : Optional[int] = rag_model.retriever(
__a , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="pt" , )
a__ : List[Any] = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
a__ : int = []
for docs in all_docs:
a__ : Optional[int] = [strip_title(__a ) for title in docs["title"]]
provenance_strings.append("\t".join(__a ) )
return provenance_strings
def UpperCamelCase_ ( __a , __a , __a ) -> Dict:
with torch.no_grad():
a__ : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__a , return_tensors="pt" , padding=__a , truncation=__a )
a__ : Any = inputs_dict.input_ids.to(args.device )
a__ : Dict = inputs_dict.attention_mask.to(args.device )
a__ : Optional[int] = rag_model.generate( # rag_model overwrites generate
__a , attention_mask=__a , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__a , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
a__ : int = rag_model.retriever.generator_tokenizer.batch_decode(__a , skip_special_tokens=__a )
if args.print_predictions:
for q, a in zip(__a , __a ):
logger.info("Q: {} - A: {}".format(__a , __a ) )
return answers
def UpperCamelCase_ ( ) -> List[str]:
a__ : int = argparse.ArgumentParser()
parser.add_argument(
"--model_type" , choices=["rag_sequence", "rag_token", "bart"] , type=__a , help=(
"RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"
" model_name_or_path"
) , )
parser.add_argument(
"--index_name" , default=__a , choices=["exact", "compressed", "legacy"] , type=__a , help="RAG model retriever type" , )
parser.add_argument(
"--index_path" , default=__a , type=__a , help="Path to the retrieval index" , )
parser.add_argument("--n_docs" , default=5 , type=__a , help="Number of retrieved docs" )
parser.add_argument(
"--model_name_or_path" , default=__a , type=__a , required=__a , help="Path to pretrained checkpoints or model identifier from huggingface.co/models" , )
parser.add_argument(
"--eval_mode" , choices=["e2e", "retrieval"] , default="e2e" , type=__a , help=(
"Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"
" precision@k."
) , )
parser.add_argument("--k" , default=1 , type=__a , help="k for the precision@k calculation" )
parser.add_argument(
"--evaluation_set" , default=__a , type=__a , required=__a , help="Path to a file containing evaluation samples" , )
parser.add_argument(
"--gold_data_path" , default=__a , type=__a , required=__a , help="Path to a tab-separated file with gold samples" , )
parser.add_argument(
"--gold_data_mode" , default="qa" , type=__a , choices=["qa", "ans"] , help=(
"Format of the gold data file"
"qa - a single line in the following format: question [tab] answer_list"
"ans - a single line of the gold file contains the expected answer string"
) , )
parser.add_argument(
"--predictions_path" , type=__a , default="predictions.txt" , help="Name of the predictions file, to be stored in the checkpoints directory" , )
parser.add_argument(
"--eval_all_checkpoints" , action="store_true" , help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number" , )
parser.add_argument(
"--eval_batch_size" , default=8 , type=__a , help="Batch size per GPU/CPU for evaluation." , )
parser.add_argument(
"--recalculate" , help="Recalculate predictions even if the prediction file exists" , action="store_true" , )
parser.add_argument(
"--num_beams" , default=4 , type=__a , help="Number of beams to be used when generating answers" , )
parser.add_argument("--min_length" , default=1 , type=__a , help="Min length of the generated answers" )
parser.add_argument("--max_length" , default=50 , type=__a , help="Max length of the generated answers" )
parser.add_argument(
"--print_predictions" , action="store_true" , help="If True, prints predictions while evaluating." , )
parser.add_argument(
"--print_docs" , action="store_true" , help="If True, prints docs retried while generating." , )
a__ : int = parser.parse_args()
a__ : Dict = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
return args
def UpperCamelCase_ ( __a ) -> Optional[int]:
a__ : Tuple = {}
if args.model_type is None:
a__ : List[str] = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("rag" ):
a__ : int = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration
a__ : Tuple = args.n_docs
if args.index_name is not None:
a__ : Any = args.index_name
if args.index_path is not None:
a__ : int = args.index_path
else:
a__ : Optional[Any] = BartForConditionalGeneration
a__ : Tuple = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("Evaluate the following checkpoints: %s" , __a )
a__ : Any = get_scores if args.eval_mode == "e2e" else get_precision_at_k
a__ : Union[str, Any] = evaluate_batch_eae if args.eval_mode == "e2e" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path ) )
score_fn(__a , args.predictions_path , args.gold_data_path )
continue
logger.info("***** Running evaluation for {} *****".format(__a ) )
logger.info(" Batch size = %d" , args.eval_batch_size )
logger.info(" Predictions will be stored under {}".format(args.predictions_path ) )
if args.model_type.startswith("rag" ):
a__ : str = RagRetriever.from_pretrained(__a , **__a )
a__ : Optional[int] = model_class.from_pretrained(__a , retriever=__a , **__a )
model.retriever.init_retrieval()
else:
a__ : Dict = model_class.from_pretrained(__a , **__a )
model.to(args.device )
with open(args.evaluation_set , "r" ) as eval_file, open(args.predictions_path , "w" ) as preds_file:
a__ : List[Any] = []
for line in tqdm(__a ):
questions.append(line.strip() )
if len(__a ) == args.eval_batch_size:
a__ : Union[str, Any] = evaluate_batch_fn(__a , __a , __a )
preds_file.write("\n".join(__a ) + "\n" )
preds_file.flush()
a__ : Any = []
if len(__a ) > 0:
a__ : List[str] = evaluate_batch_fn(__a , __a , __a )
preds_file.write("\n".join(__a ) )
preds_file.flush()
score_fn(__a , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
UpperCamelCase : List[Any] = get_args()
main(args)
| 37 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase = {
'''configuration_squeezebert''': [
'''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SqueezeBertConfig''',
'''SqueezeBertOnnxConfig''',
],
'''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''SqueezeBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SqueezeBertForMaskedLM''',
'''SqueezeBertForMultipleChoice''',
'''SqueezeBertForQuestionAnswering''',
'''SqueezeBertForSequenceClassification''',
'''SqueezeBertForTokenClassification''',
'''SqueezeBertModel''',
'''SqueezeBertModule''',
'''SqueezeBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 717 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[int] = list(snake_case__)
lowerCAmelCase_ : Tuple = list(snake_case__)
lowerCAmelCase_ : List[str] = 0
for i in range(len(snake_case__)):
if lista[i] != lista[i]:
count += 1
lowerCAmelCase_ : Dict = "_"
if count > 1:
return False
else:
return "".join(snake_case__)
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Union[str, Any] = []
while True:
lowerCAmelCase_ : Tuple = ["$"] * len(snake_case__)
lowerCAmelCase_ : Tuple = []
for i in range(len(snake_case__)):
for j in range(i + 1 , len(snake_case__)):
lowerCAmelCase_ : Optional[int] = compare_string(binary[i] , binary[j])
if k is False:
lowerCAmelCase_ : str = "*"
lowerCAmelCase_ : Tuple = "*"
temp.append("X")
for i in range(len(snake_case__)):
if checka[i] == "$":
pi.append(binary[i])
if len(snake_case__) == 0:
return pi
lowerCAmelCase_ : List[Any] = list(set(snake_case__))
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[int] = []
for minterm in minterms:
lowerCAmelCase_ : Dict = ""
for _ in range(snake_case__):
lowerCAmelCase_ : Dict = str(minterm % 2) + string
minterm //= 2
temp.append(snake_case__)
return temp
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[Any] = list(snake_case__)
lowerCAmelCase_ : Dict = list(snake_case__)
lowerCAmelCase_ : Dict = 0
for i in range(len(snake_case__)):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[Any] = []
lowerCAmelCase_ : Dict = [0] * len(snake_case__)
for i in range(len(chart[0])):
lowerCAmelCase_ : List[Any] = 0
lowerCAmelCase_ : int = -1
for j in range(len(snake_case__)):
if chart[j][i] == 1:
count += 1
lowerCAmelCase_ : Optional[int] = j
if count == 1:
lowerCAmelCase_ : Union[str, Any] = 1
for i in range(len(snake_case__)):
if select[i] == 1:
for j in range(len(chart[0])):
if chart[i][j] == 1:
for k in range(len(snake_case__)):
lowerCAmelCase_ : Tuple = 0
temp.append(prime_implicants[i])
while True:
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : Dict = -1
lowerCAmelCase_ : Tuple = 0
for i in range(len(snake_case__)):
lowerCAmelCase_ : Dict = chart[i].count(1)
if count_n > max_n:
lowerCAmelCase_ : Optional[int] = count_n
lowerCAmelCase_ : Optional[Any] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem])
for i in range(len(chart[0])):
if chart[rem][i] == 1:
for j in range(len(snake_case__)):
lowerCAmelCase_ : Any = 0
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : str = [[0 for x in range(len(snake_case__))] for x in range(len(snake_case__))]
for i in range(len(snake_case__)):
lowerCAmelCase_ : Optional[Any] = prime_implicants[i].count("_")
for j in range(len(snake_case__)):
if is_for_table(prime_implicants[i] , binary[j] , snake_case__):
lowerCAmelCase_ : Dict = 1
return chart
def UpperCamelCase ( ):
lowerCAmelCase_ : Optional[Any] = int(input("Enter the no. of variables\n"))
lowerCAmelCase_ : Tuple = [
float(snake_case__)
for x in input(
"Enter the decimal representation of Minterms 'Spaces Separated'\n").split()
]
lowerCAmelCase_ : Any = decimal_to_binary(snake_case__ , snake_case__)
lowerCAmelCase_ : Dict = check(snake_case__)
print("Prime Implicants are:")
print(snake_case__)
lowerCAmelCase_ : int = prime_implicant_chart(snake_case__ , snake_case__)
lowerCAmelCase_ : List[str] = selection(snake_case__ , snake_case__)
print("Essential Prime Implicants are:")
print(snake_case__)
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 683 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ ="""dandelin/vilt-b32-finetuned-vqa"""
snake_case_ =(
"""This is a tool that answers a question about an image. It takes an input named `image` which should be the """
"""image containing the information, as well as a `question` which should be the question in English. It """
"""returns a text that is the answer to the question."""
)
snake_case_ ="""image_qa"""
snake_case_ =AutoProcessor
snake_case_ =AutoModelForVisualQuestionAnswering
snake_case_ =["""image""", """text"""]
snake_case_ =["""text"""]
def __init__(self ,*__lowerCamelCase ,**__lowerCamelCase ) -> Dict:
"""simple docstring"""
requires_backends(self ,['''vision'''] )
super().__init__(*__lowerCamelCase ,**__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ) -> List[str]:
"""simple docstring"""
return self.pre_processor(__lowerCamelCase ,__lowerCamelCase ,return_tensors='''pt''' )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> Any:
"""simple docstring"""
with torch.no_grad():
return self.model(**__lowerCamelCase ).logits
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> int:
"""simple docstring"""
lowerCAmelCase__ : Any = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 647 |
import os
import jsonlines
import numpy as np
from tqdm import tqdm
__snake_case : List[str] =2_0_4_8
__snake_case : List[Any] =4_0_9_6
__snake_case : Tuple =4_2
__snake_case : List[Any] =os.environ.pop('PROCESS_TRAIN', 'false')
__snake_case : Tuple ={'null': 0, 'short': 1, 'long': 2, 'yes': 3, 'no': 4}
def lowerCAmelCase__ ( lowerCamelCase_ : Optional[int]):
'''simple docstring'''
def choose_first(lowerCamelCase_ : Optional[Any] ,lowerCamelCase_ : Tuple=False):
assert isinstance(lowerCamelCase_ ,lowerCamelCase_)
if len(lowerCamelCase_) == 1:
lowerCAmelCase__ : Dict = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
lowerCAmelCase__ : Dict = {k: [a[k]] for k in a}
if len(a['''start_token''']) > 0:
break
return a
lowerCAmelCase__ : Dict = {'''id''': example['''id''']}
lowerCAmelCase__ : str = example['''annotations''']
lowerCAmelCase__ : List[str] = annotation['''yes_no_answer''']
if 0 in yes_no_answer or 1 in yes_no_answer:
lowerCAmelCase__ : Union[str, Any] = ['''yes'''] if 1 in yes_no_answer else ['''no''']
lowerCAmelCase__ : List[Any] = []
lowerCAmelCase__ : Tuple = []
lowerCAmelCase__ : List[Any] = ['''<cls>''']
else:
lowerCAmelCase__ : Any = ['''short''']
lowerCAmelCase__ : int = choose_first(annotation['''short_answers'''])
if len(out['''start_token''']) == 0:
# answer will be long if short is not available
lowerCAmelCase__ : Optional[Any] = ['''long''']
lowerCAmelCase__ : str = choose_first(annotation['''long_answer'''] ,is_long_answer=lowerCamelCase_)
lowerCAmelCase__ : Optional[Any] = []
answer.update(lowerCamelCase_)
# disregard some samples
if len(answer['''start_token''']) > 1 or answer["start_token"] == answer["end_token"]:
lowerCAmelCase__ : Optional[Any] = True
else:
lowerCAmelCase__ : Union[str, Any] = False
lowerCAmelCase__ : Tuple = ['''start_token''', '''end_token''', '''start_byte''', '''end_byte''', '''text''']
if not all(isinstance(answer[k] ,lowerCamelCase_) for k in cols):
raise ValueError('''Issue in ID''' ,example['''id'''])
return answer
def lowerCAmelCase__ ( lowerCamelCase_ : Optional[int] ,lowerCamelCase_ : Union[str, Any]=False):
'''simple docstring'''
lowerCAmelCase__ : Dict = _get_single_answer(lowerCamelCase_)
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
lowerCAmelCase__ : Union[str, Any] = example['''document''']['''tokens''']
lowerCAmelCase__ : Dict = []
for i in range(len(doc['''token'''])):
if not doc["is_html"][i]:
context.append(doc['''token'''][i])
return {
"context": " ".join(lowerCamelCase_),
"answer": {
"start_token": -100, # ignore index in cross-entropy
"end_token": -100, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
lowerCAmelCase__ : Any = ['''start_token''', '''end_token''']
answer.update({k: answer[k][0] if len(answer[k]) > 0 else answer[k] for k in cols}) # e.g. [10] == 10
lowerCAmelCase__ : Dict = example['''document''']['''tokens''']
lowerCAmelCase__ : List[Any] = answer['''start_token''']
lowerCAmelCase__ : Any = answer['''end_token''']
lowerCAmelCase__ : Dict = []
for i in range(len(doc['''token'''])):
if not doc["is_html"][i]:
context.append(doc['''token'''][i])
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
lowerCAmelCase__ : str = ''' '''.join(context[start_token:end_token])
# checking above code
if assertion:
lowerCAmelCase__ : Any = doc['''is_html'''][answer['''start_token'''] : answer['''end_token''']]
lowerCAmelCase__ : List[str] = doc['''token'''][answer['''start_token'''] : answer['''end_token''']]
lowerCAmelCase__ : List[Any] = ''' '''.join([old[i] for i in range(len(lowerCamelCase_)) if not is_html[i]])
if new != old:
print('''ID:''' ,example['''id'''])
print('''New:''' ,lowerCamelCase_ ,end='''\n''')
print('''Old:''' ,lowerCamelCase_ ,end='''\n\n''')
return {
"context": " ".join(lowerCamelCase_),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def lowerCAmelCase__ ( lowerCamelCase_ : List[str] ,lowerCamelCase_ : List[Any] ,lowerCamelCase_ : Union[str, Any]=2048 ,lowerCamelCase_ : Optional[Any]=4096 ,lowerCamelCase_ : Dict=True):
'''simple docstring'''
lowerCAmelCase__ : Tuple = get_context_and_ans(lowerCamelCase_ ,assertion=lowerCamelCase_)
lowerCAmelCase__ : str = out['''answer''']
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
lowerCAmelCase__ : Tuple = tokenizer(example['''question''']['''text'''] ,out['''context''']).input_ids
lowerCAmelCase__ : int = input_ids.index(tokenizer.sep_token_id) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
lowerCAmelCase__ : Dict = []
lowerCAmelCase__ : int = []
lowerCAmelCase__ : Optional[int] = input_ids[:q_len]
lowerCAmelCase__ : Tuple = range(lowerCamelCase_ ,len(lowerCamelCase_) ,max_length - doc_stride)
for i in doc_start_indices:
lowerCAmelCase__ : Union[str, Any] = i + max_length - q_len
lowerCAmelCase__ : Union[str, Any] = input_ids[i:end_index]
inputs.append(q_indices + slice)
category.append(answer['''category'''][0])
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-100] * len(lowerCamelCase_),
"end_token": [-100] * len(lowerCamelCase_),
"category": category,
},
}
lowerCAmelCase__ : List[Any] = out['''context'''].split()
lowerCAmelCase__ : Optional[Any] = splitted_context[answer['''end_token''']]
lowerCAmelCase__ : Any = len(
tokenizer(
''' '''.join(splitted_context[: answer['''start_token''']]) ,add_special_tokens=lowerCamelCase_ ,).input_ids)
lowerCAmelCase__ : List[Any] = len(
tokenizer(''' '''.join(splitted_context[: answer['''end_token''']]) ,add_special_tokens=lowerCamelCase_).input_ids)
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
lowerCAmelCase__ : Union[str, Any] = len(tokenizer(lowerCamelCase_ ,add_special_tokens=lowerCamelCase_).input_ids)
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
lowerCAmelCase__ : Any = input_ids[answer['''start_token'''] : answer['''end_token'''] + 1] # right & left are inclusive
lowerCAmelCase__ : List[str] = answer['''start_token''']
lowerCAmelCase__ : Union[str, Any] = answer['''end_token''']
if assertion:
lowerCAmelCase__ : List[str] = tokenizer.decode(lowerCamelCase_)
if answer["span"] != new:
print('''ISSUE IN TOKENIZATION''')
print('''OLD:''' ,answer['''span'''])
print('''NEW:''' ,lowerCamelCase_ ,end='''\n\n''')
if len(lowerCamelCase_) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
lowerCAmelCase__ : Tuple = input_ids[:q_len]
lowerCAmelCase__ : Optional[int] = range(lowerCamelCase_ ,len(lowerCamelCase_) ,max_length - doc_stride)
lowerCAmelCase__ : int = []
lowerCAmelCase__ : List[str] = []
lowerCAmelCase__ : Optional[int] = []
lowerCAmelCase__ : Tuple = [] # null, yes, no, long, short
for i in doc_start_indices:
lowerCAmelCase__ : Tuple = i + max_length - q_len
lowerCAmelCase__ : Tuple = input_ids[i:end_index]
inputs.append(q_indices + slice)
assert len(inputs[-1]) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
lowerCAmelCase__ : Any = start_token - i + q_len
lowerCAmelCase__ : Optional[Any] = end_token - i + q_len
answers_category.append(answer['''category'''][0]) # ["short"] -> "short"
else:
lowerCAmelCase__ : str = -100
lowerCAmelCase__ : Any = -100
answers_category.append('''null''')
lowerCAmelCase__ : List[Any] = inputs[-1][start_token : end_token + 1]
answers_start_token.append(lowerCamelCase_)
answers_end_token.append(lowerCamelCase_)
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print('''ISSUE in strided for ID:''' ,example['''id'''])
print('''New:''' ,tokenizer.decode(lowerCamelCase_))
print('''Old:''' ,tokenizer.decode(lowerCamelCase_) ,end='''\n\n''')
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def lowerCAmelCase__ ( lowerCamelCase_ : Optional[int] ,lowerCamelCase_ : Dict ,lowerCamelCase_ : str=2048 ,lowerCamelCase_ : List[str]=4096 ,lowerCamelCase_ : List[str]=False):
'''simple docstring'''
lowerCAmelCase__ : List[str] = get_strided_contexts_and_ans(
lowerCamelCase_ ,lowerCamelCase_ ,doc_stride=lowerCamelCase_ ,max_length=lowerCamelCase_ ,assertion=lowerCamelCase_ ,)
return example
def lowerCAmelCase__ ( lowerCamelCase_ : List[str] ,lowerCamelCase_ : str):
'''simple docstring'''
with jsonlines.open(lowerCamelCase_ ,'''a''') as writer:
for example in tqdm(lowerCamelCase_ ,total=len(lowerCamelCase_) ,desc='''Saving samples ... '''):
lowerCAmelCase__ : Union[str, Any] = example['''labels''']
for ids, start, end, cat in zip(
example['''input_ids'''] ,labels['''start_token'''] ,labels['''end_token'''] ,labels['''category'''] ,):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
'''input_ids''': ids,
'''start_token''': start,
'''end_token''': end,
'''category''': CATEGORY_MAPPING[cat],
})
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
__snake_case : str =load_dataset('natural_questions')
__snake_case : int =BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base')
__snake_case : str =data['train' if PROCESS_TRAIN == 'true' else 'validation']
__snake_case : Dict ={
'tokenizer': tokenizer,
'doc_stride': DOC_STRIDE,
'max_length': MAX_LENGTH,
'assertion': False,
}
__snake_case : int =data.map(prepare_inputs, fn_kwargs=fn_kwargs)
__snake_case : List[Any] =data.remove_columns(['annotations', 'document', 'id', 'question'])
print(data)
np.random.seed(SEED)
__snake_case : int ='nq-training.jsonl' if PROCESS_TRAIN == 'true' else 'nq-validation.jsonl'
save_to_disk(data, file_name=cache_file_name)
| 647 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase ( a_ , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : Optional[Any] = KandinskyVaaControlnetPipeline
_UpperCamelCase : List[Any] = ["image_embeds", "negative_image_embeds", "hint"]
_UpperCamelCase : int = ["image_embeds", "negative_image_embeds", "hint"]
_UpperCamelCase : int = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
_UpperCamelCase : Any = False
@property
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
return 32
@property
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return 32
@property
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
return self.time_input_dim
@property
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
return 1_00
@property
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : Optional[int] = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
_snake_case : str = UNetaDConditionModel(**lowerCamelCase_ )
return model
@property
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : Any = VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
_snake_case : Dict = self.dummy_unet
_snake_case : int = self.dummy_movq
_snake_case : Any = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='linear' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , steps_offset=1 , prediction_type='epsilon' , thresholding=lowerCamelCase_ , )
_snake_case : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : Dict=0 ):
'''simple docstring'''
_snake_case : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
_snake_case : int = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowerCamelCase_ )
# create hint
_snake_case : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
if str(lowerCamelCase_ ).startswith('mps' ):
_snake_case : int = torch.manual_seed(lowerCamelCase_ )
else:
_snake_case : int = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
_snake_case : str = {
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 64,
'width': 64,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Any = 'cpu'
_snake_case : Optional[int] = self.get_dummy_components()
_snake_case : Union[str, Any] = self.pipeline_class(**lowerCamelCase_ )
_snake_case : Optional[Any] = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : Tuple = pipe(**self.get_dummy_inputs(lowerCamelCase_ ) )
_snake_case : List[Any] = output.images
_snake_case : str = pipe(
**self.get_dummy_inputs(lowerCamelCase_ ) , return_dict=lowerCamelCase_ , )[0]
_snake_case : Optional[Any] = image[0, -3:, -3:, -1]
_snake_case : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case : Tuple = np.array(
[0.695_9826, 0.86_8279, 0.755_8092, 0.6876_9467, 0.8580_5804, 0.6597_7496, 0.4488_5302, 0.595_9111, 0.425_1595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
_snake_case : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy' )
_snake_case : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
_snake_case : List[Any] = torch.from_numpy(np.array(lowerCamelCase_ ) ).float() / 255.0
_snake_case : Optional[int] = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
_snake_case : Optional[int] = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase_ )
_snake_case : str = KandinskyVaaControlnetPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth' , torch_dtype=torch.floataa )
_snake_case : Any = pipeline.to(lowerCamelCase_ )
pipeline.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : str = 'A robot, 4k photo'
_snake_case : Optional[Any] = torch.Generator(device='cuda' ).manual_seed(0 )
_snake_case , _snake_case : List[str] = pipe_prior(
lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
_snake_case : Optional[Any] = torch.Generator(device='cuda' ).manual_seed(0 )
_snake_case : Optional[Any] = pipeline(
image_embeds=lowerCamelCase_ , negative_image_embeds=lowerCamelCase_ , hint=lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=1_00 , output_type='np' , )
_snake_case : List[Any] = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(lowerCamelCase_ , lowerCamelCase_ )
| 652 |
import itertools
import math
def A__( __lowerCAmelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A__( ):
_snake_case : Optional[Any] = 2
while True:
if is_prime(__lowerCAmelCase ):
yield num
num += 1
def A__( __lowerCAmelCase = 1_00_01 ):
return next(itertools.islice(prime_generator() , nth - 1 , __lowerCAmelCase ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 652 | 1 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
lowercase__ : List[Any] = logging.get_logger(__name__)
lowercase__ : str = {
'google/umt5-small': 'https://huggingface.co/google/umt5-small/resolve/main/config.json',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : str = 'umt5'
_snake_case : Dict = ['past_key_values']
def __init__( self : Union[str, Any] , lowerCAmelCase__ : Optional[Any]=250112 , lowerCAmelCase__ : Optional[int]=512 , lowerCAmelCase__ : Optional[int]=64 , lowerCAmelCase__ : Optional[int]=1024 , lowerCAmelCase__ : Optional[int]=8 , lowerCAmelCase__ : Dict=None , lowerCAmelCase__ : List[str]=6 , lowerCAmelCase__ : Tuple=32 , lowerCAmelCase__ : Any=128 , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : int=1e-6 , lowerCAmelCase__ : Union[str, Any]=1.0 , lowerCAmelCase__ : Union[str, Any]="gated-gelu" , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : List[Any]=True , lowerCAmelCase__ : Any="T5Tokenizer" , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : Dict=0 , lowerCAmelCase__ : str=1 , lowerCAmelCase__ : int=0 , **lowerCAmelCase__ : Dict , ) -> int:
'''simple docstring'''
super().__init__(
is_encoder_decoder=lowerCAmelCase__ , tokenizer_class=lowerCAmelCase__ , tie_word_embeddings=lowerCAmelCase__ , pad_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , )
_UpperCamelCase = vocab_size
_UpperCamelCase = d_model
_UpperCamelCase = d_kv
_UpperCamelCase = d_ff
_UpperCamelCase = num_layers
_UpperCamelCase = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
_UpperCamelCase = num_heads
_UpperCamelCase = relative_attention_num_buckets
_UpperCamelCase = relative_attention_max_distance
_UpperCamelCase = dropout_rate
_UpperCamelCase = layer_norm_epsilon
_UpperCamelCase = initializer_factor
_UpperCamelCase = feed_forward_proj
_UpperCamelCase = use_cache
_UpperCamelCase = self.feed_forward_proj.split('''-''' )
_UpperCamelCase = act_info[-1]
_UpperCamelCase = act_info[0] == '''gated'''
if len(lowerCAmelCase__ ) > 1 and act_info[0] != "gated" or len(lowerCAmelCase__ ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
if feed_forward_proj == "gated-gelu":
_UpperCamelCase = '''gelu_new'''
@property
def snake_case__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
return self.d_model
@property
def snake_case__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
return self.num_heads
@property
def snake_case__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
return self.num_layers
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def snake_case__ ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
_UpperCamelCase = {
'''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''},
'''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''},
}
if self.use_past:
_UpperCamelCase = '''past_encoder_sequence + sequence'''
_UpperCamelCase = {0: '''batch'''}
_UpperCamelCase = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
_UpperCamelCase = {0: '''batch''', 1: '''decoder_sequence'''}
_UpperCamelCase = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase__ , direction='''inputs''' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def snake_case__ ( self : Tuple ) -> int:
'''simple docstring'''
return 13
@property
def snake_case__ ( self : Optional[Any] ) -> float:
'''simple docstring'''
return 5e-4
| 98 |
"""simple docstring"""
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
UpperCAmelCase = """\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
"""
UpperCAmelCase = """\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper \"Evaluating Large Language Models Trained on Code\"
(https://arxiv.org/abs/2107.03374).
"""
UpperCAmelCase = """
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric(\"code_eval\")
>>> test_cases = [\"assert add(2,3)==5\"]
>>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{'pass@1': 0.5, 'pass@2': 1.0}
"""
UpperCAmelCase = """
################################################################################
!!!WARNING!!!
################################################################################
The \"code_eval\" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper \"Evaluating Large
Language Models Trained on Code\" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this
with:
>>> import os
>>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\"
################################################################################\
"""
UpperCAmelCase = """The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the \"Software\"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE."""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class lowercase__ ( datasets.Metric ):
def UpperCamelCase_ ( self) -> str:
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""")),
"""references""": datasets.Value("""string"""),
}) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , )
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=[1, 10, 100] , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=3.0) -> Union[str, Any]:
if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0) != "1":
raise ValueError(_WARNING)
if os.name == "nt":
raise NotImplementedError("""This metric is currently not supported on Windows.""")
with ThreadPoolExecutor(max_workers=SCREAMING_SNAKE_CASE) as executor:
_lowerCamelCase : Optional[int] = []
_lowerCamelCase : Optional[int] = Counter()
_lowerCamelCase : Any = 0
_lowerCamelCase : List[Any] = defaultdict(SCREAMING_SNAKE_CASE)
for task_id, (candidates, test_case) in enumerate(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)):
for candidate in candidates:
_lowerCamelCase : Any = candidate + """\n""" + test_case
_lowerCamelCase : Union[str, Any] = (test_program, timeout, task_id, completion_id[task_id])
_lowerCamelCase : List[str] = executor.submit(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE)
futures.append(SCREAMING_SNAKE_CASE)
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(SCREAMING_SNAKE_CASE):
_lowerCamelCase : int = future.result()
results[result["task_id"]].append((result["""completion_id"""], result))
_lowerCamelCase , _lowerCamelCase : List[Any] = [], []
for result in results.values():
result.sort()
_lowerCamelCase : List[str] = [r[1]["""passed"""] for r in result]
total.append(len(SCREAMING_SNAKE_CASE))
correct.append(sum(SCREAMING_SNAKE_CASE))
_lowerCamelCase : List[Any] = np.array(SCREAMING_SNAKE_CASE)
_lowerCamelCase : Union[str, Any] = np.array(SCREAMING_SNAKE_CASE)
_lowerCamelCase : Union[str, Any] = k
_lowerCamelCase : Optional[Any] = {F'pass@{k}': estimate_pass_at_k(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def _snake_case ( __snake_case : List[str] , __snake_case : List[str] , __snake_case : List[str] ):
"""simple docstring"""
def estimator(__snake_case : int , __snake_case : int , __snake_case : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(__snake_case , __snake_case ):
_lowerCamelCase : Optional[int] = itertools.repeat(__snake_case , len(__snake_case ) )
else:
assert len(__snake_case ) == len(__snake_case )
_lowerCamelCase : List[str] = iter(__snake_case )
return np.array([estimator(int(__snake_case ) , int(__snake_case ) , __snake_case ) for n, c in zip(__snake_case , __snake_case )] )
| 88 | 0 |
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
lowercase_ = [
"EAGER",
"AOT_EAGER",
"INDUCTOR",
"NVFUSER",
"AOT_NVFUSER",
"AOT_CUDAGRAPHS",
"OFI",
"FX2TRT",
"ONNXRT",
"IPEX",
]
def lowerCAmelCase (__A , __A=None , __A=None , __A=None):
"""simple docstring"""
_a = True
while ask_again:
_a = input(__A)
try:
if default is not None and len(__A) == 0:
return default
return convert_value(__A) if convert_value is not None else result
except Exception:
if error_message is not None:
print(__A)
def lowerCAmelCase (__A , __A=[] , __A=None , __A=0):
"""simple docstring"""
_a = BulletMenu(__A , __A)
_a = menu.run(default_choice=__A)
return convert_value(__A) if convert_value is not None else result
def lowerCAmelCase (__A):
"""simple docstring"""
_a = int(__A)
return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value])
def lowerCAmelCase (__A):
"""simple docstring"""
_a = int(__A)
return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value])
def lowerCAmelCase (__A):
"""simple docstring"""
_a = int(__A)
return DynamoBackend(DYNAMO_BACKENDS[value]).value
def lowerCAmelCase (__A):
"""simple docstring"""
_a = int(__A)
return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value])
def lowerCAmelCase (__A):
"""simple docstring"""
_a = int(__A)
return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value])
def lowerCAmelCase (__A):
"""simple docstring"""
return {"yes": True, "no": False}[value.lower()]
class __A ( argparse.RawDescriptionHelpFormatter ):
'''simple docstring'''
def a__ (self , A , A , A , A ) -> Any:
"""simple docstring"""
_a = super()._format_usage(A , A , A , A )
_a = usage.replace('''<command> [<args>] ''' , '''''' )
return usage
| 721 |
'''simple docstring'''
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
lowercase_ = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
lowercase_ = typing.Union[np.floataa, int, float] # noqa: UP007
def lowerCAmelCase (__A , __A):
"""simple docstring"""
return np.sqrt(np.sum((np.asarray(__A) - np.asarray(__A)) ** 2))
def lowerCAmelCase (__A , __A):
"""simple docstring"""
return sum((va - va) ** 2 for va, va in zip(__A , __A)) ** (1 / 2)
if __name__ == "__main__":
def lowerCAmelCase ():
"""simple docstring"""
from timeit import timeit
print('''Without Numpy''')
print(
timeit(
'''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=10_000 , globals=globals() , ))
print('''With Numpy''')
print(
timeit(
'''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=10_000 , globals=globals() , ))
benchmark()
| 352 | 0 |
'''simple docstring'''
from math import log
from scipy.constants import Boltzmann, physical_constants
_UpperCAmelCase : Tuple = 3_00 # TEMPERATURE (unit = K)
def _SCREAMING_SNAKE_CASE ( __snake_case : float , __snake_case : float , __snake_case : float , ):
if donor_conc <= 0:
raise ValueError('Donor concentration should be positive' )
elif acceptor_conc <= 0:
raise ValueError('Acceptor concentration should be positive' )
elif intrinsic_conc <= 0:
raise ValueError('Intrinsic concentration should be positive' )
elif donor_conc <= intrinsic_conc:
raise ValueError(
'Donor concentration should be greater than intrinsic concentration' )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'Acceptor concentration should be greater than intrinsic concentration' )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 107 | '''simple docstring'''
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( __snake_case : int | str ):
_A = str(__snake_case )
return n == n[::-1]
def _SCREAMING_SNAKE_CASE ( __snake_case : int = 1_0_0_0_0_0_0 ):
_A = 0
for i in range(1 , __snake_case ):
if is_palindrome(__snake_case ) and is_palindrome(bin(__snake_case ).split('b' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 107 | 1 |
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
with open(__snake_case ) as metadata_file:
SCREAMING_SNAKE_CASE_ = json.load(__snake_case )
SCREAMING_SNAKE_CASE_ = LukeConfig(use_entity_aware_attention=__snake_case , **metadata['model_config'] )
# Load in the weights from the checkpoint_path
SCREAMING_SNAKE_CASE_ = torch.load(__snake_case , map_location='cpu' )["module"]
# Load the entity vocab file
SCREAMING_SNAKE_CASE_ = load_original_entity_vocab(__snake_case )
# add an entry for [MASK2]
SCREAMING_SNAKE_CASE_ = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
SCREAMING_SNAKE_CASE_ = XLMRobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
SCREAMING_SNAKE_CASE_ = AddedToken('<ent>' , lstrip=__snake_case , rstrip=__snake_case )
SCREAMING_SNAKE_CASE_ = AddedToken('<ent2>' , lstrip=__snake_case , rstrip=__snake_case )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(__snake_case )
with open(os.path.join(__snake_case , 'tokenizer_config.json' ) , 'r' ) as f:
SCREAMING_SNAKE_CASE_ = json.load(__snake_case )
SCREAMING_SNAKE_CASE_ = "MLukeTokenizer"
with open(os.path.join(__snake_case , 'tokenizer_config.json' ) , 'w' ) as f:
json.dump(__snake_case , __snake_case )
with open(os.path.join(__snake_case , MLukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f:
json.dump(__snake_case , __snake_case )
SCREAMING_SNAKE_CASE_ = MLukeTokenizer.from_pretrained(__snake_case )
# Initialize the embeddings of the special tokens
SCREAMING_SNAKE_CASE_ = tokenizer.convert_tokens_to_ids(['@'] )[0]
SCREAMING_SNAKE_CASE_ = tokenizer.convert_tokens_to_ids(['#'] )[0]
SCREAMING_SNAKE_CASE_ = state_dict["embeddings.word_embeddings.weight"]
SCREAMING_SNAKE_CASE_ = word_emb[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE_ = word_emb[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE_ = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
SCREAMING_SNAKE_CASE_ = state_dict[bias_name]
SCREAMING_SNAKE_CASE_ = decoder_bias[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE_ = decoder_bias[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE_ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
SCREAMING_SNAKE_CASE_ = f"""encoder.layer.{layer_index}.attention.self."""
SCREAMING_SNAKE_CASE_ = state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE_ = state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE_ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
SCREAMING_SNAKE_CASE_ = state_dict["entity_embeddings.entity_embeddings.weight"]
SCREAMING_SNAKE_CASE_ = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
SCREAMING_SNAKE_CASE_ = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
SCREAMING_SNAKE_CASE_ = state_dict["entity_predictions.bias"]
SCREAMING_SNAKE_CASE_ = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
SCREAMING_SNAKE_CASE_ = torch.cat([entity_prediction_bias, entity_mask_bias] )
SCREAMING_SNAKE_CASE_ = LukeForMaskedLM(config=__snake_case ).eval()
state_dict.pop('entity_predictions.decoder.weight' )
state_dict.pop('lm_head.decoder.weight' )
state_dict.pop('lm_head.decoder.bias' )
SCREAMING_SNAKE_CASE_ = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('lm_head' ) or key.startswith('entity_predictions' )):
SCREAMING_SNAKE_CASE_ = state_dict[key]
else:
SCREAMING_SNAKE_CASE_ = state_dict[key]
SCREAMING_SNAKE_CASE_ = model.load_state_dict(__snake_case , strict=__snake_case )
if set(__snake_case ) != {"luke.embeddings.position_ids"}:
raise ValueError(f"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(__snake_case ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
SCREAMING_SNAKE_CASE_ = MLukeTokenizer.from_pretrained(__snake_case , task='entity_classification' )
SCREAMING_SNAKE_CASE_ = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
SCREAMING_SNAKE_CASE_ = (0, 9)
SCREAMING_SNAKE_CASE_ = tokenizer(__snake_case , entity_spans=[span] , return_tensors='pt' )
SCREAMING_SNAKE_CASE_ = model(**__snake_case )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE_ = torch.Size((1, 33, 768) )
SCREAMING_SNAKE_CASE_ = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __snake_case , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE_ = torch.Size((1, 1, 768) )
SCREAMING_SNAKE_CASE_ = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
f""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __snake_case , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
SCREAMING_SNAKE_CASE_ = MLukeTokenizer.from_pretrained(__snake_case )
SCREAMING_SNAKE_CASE_ = "Tokyo is the capital of <mask>."
SCREAMING_SNAKE_CASE_ = (24, 30)
SCREAMING_SNAKE_CASE_ = tokenizer(__snake_case , entity_spans=[span] , return_tensors='pt' )
SCREAMING_SNAKE_CASE_ = model(**__snake_case )
SCREAMING_SNAKE_CASE_ = encoding["input_ids"][0].tolist()
SCREAMING_SNAKE_CASE_ = input_ids.index(tokenizer.convert_tokens_to_ids('<mask>' ) )
SCREAMING_SNAKE_CASE_ = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(__snake_case )
SCREAMING_SNAKE_CASE_ = outputs.entity_logits[0][0].argmax().item()
SCREAMING_SNAKE_CASE_ = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('en:' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(__snake_case ) )
model.save_pretrained(__snake_case )
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ["[MASK]", "[PAD]", "[UNK]"]
SCREAMING_SNAKE_CASE_ = [json.loads(__snake_case ) for line in open(__snake_case )]
SCREAMING_SNAKE_CASE_ = {}
for entry in data:
SCREAMING_SNAKE_CASE_ = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
SCREAMING_SNAKE_CASE_ = entity_id
break
SCREAMING_SNAKE_CASE_ = f"""{language}:{entity_name}"""
SCREAMING_SNAKE_CASE_ = entity_id
return new_mapping
if __name__ == "__main__":
UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
UpperCamelCase__ : Tuple = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 713 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCamelCase__ : List[Any] = torch.device("cpu")
def _UpperCAmelCase ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
SCREAMING_SNAKE_CASE_ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_7_0_3E0_0, 2.1_1_0_7E0_0, -2.0_8_1_1E0_0, 8.8_6_8_5E-0_1, 2.4_3_6_0E-0_1] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_6_3_6E-0_1, 2.3_4_7_8E-0_1, -1.6_9_6_3E0_0, -1.7_3_8_1E0_0, -8.6_3_3_7E-0_1] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_7_6_8E-0_1, -4.7_4_2_9E-0_1, -1.0_8_9_7E0_0, -1.0_2_4_8E0_0, 3.5_5_2_3E-0_2] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_3_3_0E-0_1, 2.4_2_1_1E-0_1, -6.0_1_8_5E-0_1, -8.2_7_8_9E-0_1, -6.0_4_4_6E-0_2] )
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = dct.pop(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = val
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = []
for k in state_dict.keys():
SCREAMING_SNAKE_CASE_ = k
if ".pwconv" in k:
SCREAMING_SNAKE_CASE_ = k_new.replace('.pwconv' , '.point_wise_conv' )
if ".dwconv" in k:
SCREAMING_SNAKE_CASE_ = k_new.replace('.dwconv' , '.depth_wise_conv' )
if ".Proj." in k:
SCREAMING_SNAKE_CASE_ = k_new.replace('.Proj.' , '.proj.' )
if "patch_embed" in k_new:
SCREAMING_SNAKE_CASE_ = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
SCREAMING_SNAKE_CASE_ = k_new.split('.' )
if ls[2].isdigit():
SCREAMING_SNAKE_CASE_ = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] )
else:
SCREAMING_SNAKE_CASE_ = k_new.replace('network' , 'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
SCREAMING_SNAKE_CASE_ = 1_000
SCREAMING_SNAKE_CASE_ = 'huggingface/label-files'
SCREAMING_SNAKE_CASE_ = 'imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
SCREAMING_SNAKE_CASE_ = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = idalabel
SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
SCREAMING_SNAKE_CASE_ = [3, 3, 6, 4]
SCREAMING_SNAKE_CASE_ = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
SCREAMING_SNAKE_CASE_ = [3, 3, 9, 6]
SCREAMING_SNAKE_CASE_ = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
SCREAMING_SNAKE_CASE_ = [4, 3, 10, 5]
SCREAMING_SNAKE_CASE_ = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
SCREAMING_SNAKE_CASE_ = [4, 4, 12, 6]
SCREAMING_SNAKE_CASE_ = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
SCREAMING_SNAKE_CASE_ = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location='cpu' , check_hash=_SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE_ = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' )
SCREAMING_SNAKE_CASE_ = checkpoint
SCREAMING_SNAKE_CASE_ = create_rename_keys(_SCREAMING_SNAKE_CASE )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# load HuggingFace model
SCREAMING_SNAKE_CASE_ = SwiftFormerForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
hf_model.load_state_dict(_SCREAMING_SNAKE_CASE )
# prepare test inputs
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = ViTImageProcessor.from_pretrained('preprocessor_config' )
SCREAMING_SNAKE_CASE_ = processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# compare outputs from both models
SCREAMING_SNAKE_CASE_ = get_expected_output(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 1_000] )
assert torch.allclose(hf_logits[0, 0:5] , _SCREAMING_SNAKE_CASE , atol=1E-3 )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(f"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCamelCase__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swiftformer_name",
default="swiftformer_xs",
choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"],
type=str,
help="Name of the SwiftFormer model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="./converted_outputs/",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.")
UpperCamelCase__ : Union[str, Any] = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 620 | 0 |
"""simple docstring"""
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
_snake_case = '''scheduler_config.json'''
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict = 1
SCREAMING_SNAKE_CASE_: Optional[int] = 2
SCREAMING_SNAKE_CASE_: int = 3
SCREAMING_SNAKE_CASE_: Tuple = 4
SCREAMING_SNAKE_CASE_: Dict = 5
@dataclass
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: jnp.ndarray
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple = SCHEDULER_CONFIG_NAME
SCREAMING_SNAKE_CASE_: Tuple = ["dtype"]
SCREAMING_SNAKE_CASE_: List[Any] = []
SCREAMING_SNAKE_CASE_: Any = True
@classmethod
def __lowerCamelCase ( cls : str , UpperCAmelCase_ : Dict[str, Any] = None , UpperCAmelCase_ : Optional[str] = None , UpperCAmelCase_ : int=False , **UpperCAmelCase_ : List[Any] , ) -> Dict:
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = cls.load_config(
pretrained_model_name_or_path=UpperCAmelCase_ , subfolder=UpperCAmelCase_ , return_unused_kwargs=UpperCAmelCase_ , **UpperCAmelCase_ , )
_lowerCAmelCase , _lowerCAmelCase = cls.from_config(UpperCAmelCase_ , return_unused_kwargs=UpperCAmelCase_ , **UpperCAmelCase_ )
if hasattr(UpperCAmelCase_ , 'create_state' ) and getattr(UpperCAmelCase_ , 'has_state' , UpperCAmelCase_ ):
_lowerCAmelCase = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def __lowerCamelCase ( self : List[str] , UpperCAmelCase_ : Union[str, os.PathLike] , UpperCAmelCase_ : bool = False , **UpperCAmelCase_ : Dict ) -> List[Any]:
"""simple docstring"""
self.save_config(save_directory=UpperCAmelCase_ , push_to_hub=UpperCAmelCase_ , **UpperCAmelCase_ )
@property
def __lowerCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
return self._get_compatibles()
@classmethod
def __lowerCamelCase ( cls : Optional[int] ) -> str:
"""simple docstring"""
_lowerCAmelCase = list(set([cls.__name__] + cls._compatibles ) )
_lowerCAmelCase = importlib.import_module(__name__.split('.' )[0] )
_lowerCAmelCase = [
getattr(UpperCAmelCase_ , UpperCAmelCase_ ) for c in compatible_classes_str if hasattr(UpperCAmelCase_ , UpperCAmelCase_ )
]
return compatible_classes
def __snake_case ( SCREAMING_SNAKE_CASE: jnp.ndarray , SCREAMING_SNAKE_CASE: Tuple[int] ):
"""simple docstring"""
assert len(SCREAMING_SNAKE_CASE ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(SCREAMING_SNAKE_CASE ) - x.ndim) ) , SCREAMING_SNAKE_CASE )
def __snake_case ( SCREAMING_SNAKE_CASE: int , SCREAMING_SNAKE_CASE: Dict=0.9_99 , SCREAMING_SNAKE_CASE: Dict=jnp.floataa ):
"""simple docstring"""
def alpha_bar(SCREAMING_SNAKE_CASE: Optional[Any] ):
return math.cos((time_step + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
_lowerCAmelCase = []
for i in range(SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = i / num_diffusion_timesteps
_lowerCAmelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(SCREAMING_SNAKE_CASE ) / alpha_bar(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) )
return jnp.array(SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE )
@flax.struct.dataclass
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE_: jnp.ndarray
SCREAMING_SNAKE_CASE_: jnp.ndarray
SCREAMING_SNAKE_CASE_: jnp.ndarray
@classmethod
def __lowerCamelCase ( cls : Optional[int] , UpperCAmelCase_ : Tuple ) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase = scheduler.config
if config.trained_betas is not None:
_lowerCAmelCase = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
_lowerCAmelCase = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_lowerCAmelCase = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_lowerCAmelCase = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
F"""beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}""" )
_lowerCAmelCase = 1.0 - betas
_lowerCAmelCase = jnp.cumprod(UpperCAmelCase_ , axis=0 )
return cls(
alphas=UpperCAmelCase_ , betas=UpperCAmelCase_ , alphas_cumprod=UpperCAmelCase_ , )
def __snake_case ( SCREAMING_SNAKE_CASE: CommonSchedulerState , SCREAMING_SNAKE_CASE: jnp.ndarray , SCREAMING_SNAKE_CASE: jnp.ndarray , SCREAMING_SNAKE_CASE: jnp.ndarray ):
"""simple docstring"""
_lowerCAmelCase = state.alphas_cumprod
_lowerCAmelCase = alphas_cumprod[timesteps] ** 0.5
_lowerCAmelCase = sqrt_alpha_prod.flatten()
_lowerCAmelCase = broadcast_to_shape_from_left(SCREAMING_SNAKE_CASE , original_samples.shape )
_lowerCAmelCase = (1 - alphas_cumprod[timesteps]) ** 0.5
_lowerCAmelCase = sqrt_one_minus_alpha_prod.flatten()
_lowerCAmelCase = broadcast_to_shape_from_left(SCREAMING_SNAKE_CASE , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def __snake_case ( SCREAMING_SNAKE_CASE: CommonSchedulerState , SCREAMING_SNAKE_CASE: jnp.ndarray , SCREAMING_SNAKE_CASE: jnp.ndarray , SCREAMING_SNAKE_CASE: jnp.ndarray ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = get_sqrt_alpha_prod(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_lowerCAmelCase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __snake_case ( SCREAMING_SNAKE_CASE: CommonSchedulerState , SCREAMING_SNAKE_CASE: jnp.ndarray , SCREAMING_SNAKE_CASE: jnp.ndarray , SCREAMING_SNAKE_CASE: jnp.ndarray ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = get_sqrt_alpha_prod(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_lowerCAmelCase = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 580 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
_snake_case = random.Random()
def __snake_case ( SCREAMING_SNAKE_CASE: str , SCREAMING_SNAKE_CASE: List[Any]=1.0 , SCREAMING_SNAKE_CASE: Any=None , SCREAMING_SNAKE_CASE: Any=None ):
"""simple docstring"""
if rng is None:
_lowerCAmelCase = global_rng
_lowerCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any=7 , UpperCAmelCase_ : Optional[int]=400 , UpperCAmelCase_ : List[str]=2_000 , UpperCAmelCase_ : List[Any]=10 , UpperCAmelCase_ : Optional[Any]=160 , UpperCAmelCase_ : List[str]=8 , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : str=4_000 , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : Tuple=True , ) -> List[str]:
"""simple docstring"""
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = min_seq_length
_lowerCAmelCase = max_seq_length
_lowerCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_lowerCAmelCase = padding_value
_lowerCAmelCase = sampling_rate
_lowerCAmelCase = return_attention_mask
_lowerCAmelCase = do_normalize
_lowerCAmelCase = feature_size
_lowerCAmelCase = chunk_length
_lowerCAmelCase = hop_length
def __lowerCamelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __lowerCamelCase ( self : int , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : str=False ) -> Union[str, Any]:
"""simple docstring"""
def _flatten(UpperCAmelCase_ : Optional[Any] ):
return list(itertools.chain(*UpperCAmelCase_ ) )
if equal_length:
_lowerCAmelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_lowerCAmelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_lowerCAmelCase = [np.asarray(UpperCAmelCase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] = WhisperFeatureExtractor if is_speech_available() else None
def __lowerCamelCase ( self : Any ) -> Any:
"""simple docstring"""
_lowerCAmelCase = WhisperFeatureExtractionTester(self )
def __lowerCamelCase ( self : int ) -> int:
"""simple docstring"""
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase = feat_extract_first.save_pretrained(UpperCAmelCase_ )[0]
check_json_file_has_correct_format(UpperCAmelCase_ )
_lowerCAmelCase = self.feature_extraction_class.from_pretrained(UpperCAmelCase_ )
_lowerCAmelCase = feat_extract_first.to_dict()
_lowerCAmelCase = feat_extract_second.to_dict()
_lowerCAmelCase = feat_extract_first.mel_filters
_lowerCAmelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ ) )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def __lowerCamelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase = os.path.join(UpperCAmelCase_ , 'feat_extract.json' )
feat_extract_first.to_json_file(UpperCAmelCase_ )
_lowerCAmelCase = self.feature_extraction_class.from_json_file(UpperCAmelCase_ )
_lowerCAmelCase = feat_extract_first.to_dict()
_lowerCAmelCase = feat_extract_second.to_dict()
_lowerCAmelCase = feat_extract_first.mel_filters
_lowerCAmelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ ) )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def __lowerCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_lowerCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
_lowerCAmelCase = [np.asarray(UpperCAmelCase_ ) for speech_input in speech_inputs]
# Test feature size
_lowerCAmelCase = feature_extractor(UpperCAmelCase_ , padding='max_length' , return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_lowerCAmelCase = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features
_lowerCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 ) )
# Test batched
_lowerCAmelCase = feature_extractor(UpperCAmelCase_ , return_tensors='np' ).input_features
_lowerCAmelCase = feature_extractor(UpperCAmelCase_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
_lowerCAmelCase = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_lowerCAmelCase = np.asarray(UpperCAmelCase_ )
_lowerCAmelCase = feature_extractor(UpperCAmelCase_ , return_tensors='np' ).input_features
_lowerCAmelCase = feature_extractor(UpperCAmelCase_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 ) )
# Test truncation required
_lowerCAmelCase = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
_lowerCAmelCase = [np.asarray(UpperCAmelCase_ ) for speech_input in speech_inputs]
_lowerCAmelCase = [x[: feature_extractor.n_samples] for x in speech_inputs]
_lowerCAmelCase = [np.asarray(UpperCAmelCase_ ) for speech_input in speech_inputs_truncated]
_lowerCAmelCase = feature_extractor(UpperCAmelCase_ , return_tensors='np' ).input_features
_lowerCAmelCase = feature_extractor(UpperCAmelCase_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 ) )
def __lowerCamelCase ( self : int ) -> str:
"""simple docstring"""
import torch
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCAmelCase = np.random.rand(100 , 32 ).astype(np.floataa )
_lowerCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_lowerCAmelCase = feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_lowerCAmelCase = feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def __lowerCamelCase ( self : List[str] , UpperCAmelCase_ : Any ) -> Dict:
"""simple docstring"""
_lowerCAmelCase = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
_lowerCAmelCase = ds.sort('id' ).select(range(UpperCAmelCase_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def __lowerCamelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
_lowerCAmelCase = self._load_datasamples(1 )
_lowerCAmelCase = WhisperFeatureExtractor()
_lowerCAmelCase = feature_extractor(UpperCAmelCase_ , return_tensors='pt' ).input_features
self.assertEqual(input_features.shape , (1, 80, 3_000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , UpperCAmelCase_ , atol=1E-4 ) )
def __lowerCamelCase ( self : Tuple ) -> int:
"""simple docstring"""
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCAmelCase = self._load_datasamples(1 )[0]
_lowerCAmelCase = ((audio - audio.min()) / (audio.max() - audio.min())) * 65_535 # Rescale to [0, 65535] to show issue
_lowerCAmelCase = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=UpperCAmelCase_ )[0]
self.assertTrue(np.all(np.mean(UpperCAmelCase_ ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(UpperCAmelCase_ ) - 1 ) < 1E-3 ) )
| 580 | 1 |
'''simple docstring'''
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
_a : Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(__UpperCamelCase )
class UpperCamelCase_ ( __UpperCamelCase ):
"""simple docstring"""
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ):
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
self.check_model_type(UpperCAmelCase )
def lowerCamelCase_ ( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase ):
__lowerCamelCase , __lowerCamelCase = {}, {}
if padding is not None:
__lowerCamelCase = padding
if truncation is not None:
__lowerCamelCase = truncation
if top_k is not None:
__lowerCamelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase ):
if isinstance(UpperCAmelCase , (Image.Image, str) ) and isinstance(UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase = {"""image""": image, """question""": question}
else:
__lowerCamelCase = image
__lowerCamelCase = super().__call__(UpperCAmelCase , **UpperCAmelCase )
return results
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase=False , UpperCAmelCase=False ):
__lowerCamelCase = load_image(inputs["""image"""] )
__lowerCamelCase = self.tokenizer(
inputs["""question"""] , return_tensors=self.framework , padding=UpperCAmelCase , truncation=UpperCAmelCase )
__lowerCamelCase = self.image_processor(images=UpperCAmelCase , return_tensors=self.framework )
model_inputs.update(UpperCAmelCase )
return model_inputs
def lowerCamelCase_ ( self , UpperCAmelCase ):
__lowerCamelCase = self.model(**UpperCAmelCase )
return model_outputs
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase=5 ):
if top_k > self.model.config.num_labels:
__lowerCamelCase = self.model.config.num_labels
if self.framework == "pt":
__lowerCamelCase = model_outputs.logits.sigmoid()[0]
__lowerCamelCase , __lowerCamelCase = probs.topk(UpperCAmelCase )
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
__lowerCamelCase = scores.tolist()
__lowerCamelCase = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(UpperCAmelCase , UpperCAmelCase )]
| 708 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_a : List[str] = {
'configuration_resnet': ['RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ResNetConfig', 'ResNetOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[Any] = [
'RESNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'ResNetForImageClassification',
'ResNetModel',
'ResNetPreTrainedModel',
'ResNetBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[Any] = [
'TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFResNetForImageClassification',
'TFResNetModel',
'TFResNetPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[Any] = [
'FlaxResNetForImageClassification',
'FlaxResNetModel',
'FlaxResNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
_a : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 571 | 0 |
import math
import qiskit
def lowercase ( _lowerCAmelCase = 1 , _lowerCAmelCase = 1 , _lowerCAmelCase = 1 ):
if (
isinstance(_lowerCAmelCase , _lowerCAmelCase )
or isinstance(_lowerCAmelCase , _lowerCAmelCase )
or isinstance(_lowerCAmelCase , _lowerCAmelCase )
):
raise TypeError("""inputs must be integers.""" )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError("""inputs must be positive.""" )
if (
(math.floor(_lowerCAmelCase ) != input_a)
or (math.floor(_lowerCAmelCase ) != input_a)
or (math.floor(_lowerCAmelCase ) != carry_in)
):
raise ValueError("""inputs must be exact integers.""" )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError("""inputs must be less or equal to 2.""" )
# build registers
UpperCAmelCase__ = qiskit.QuantumRegister(4 , """qr""" )
UpperCAmelCase__ = qiskit.ClassicalRegister(2 , """cr""" )
# list the entries
UpperCAmelCase__ = [input_a, input_a, carry_in]
UpperCAmelCase__ = qiskit.QuantumCircuit(_lowerCAmelCase , _lowerCAmelCase )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(_lowerCAmelCase ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(_lowerCAmelCase ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(_lowerCAmelCase ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , _lowerCAmelCase ) # measure the last two qbits
UpperCAmelCase__ = qiskit.Aer.get_backend("""aer_simulator""" )
UpperCAmelCase__ = qiskit.execute(_lowerCAmelCase , _lowerCAmelCase , shots=1000 )
return job.result().get_counts(_lowerCAmelCase )
if __name__ == "__main__":
print(F"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""")
| 392 |
import tensorflow as tf
from ...tf_utils import shape_list
class snake_case ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : List[Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : int , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int]=1 , lowerCamelCase_ : Tuple=False , **lowerCamelCase_ : Dict ) ->Union[str, Any]:
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = d_embed
UpperCAmelCase__ = d_proj
UpperCAmelCase__ = cutoffs + [vocab_size]
UpperCAmelCase__ = [0] + self.cutoffs
UpperCAmelCase__ = div_val
UpperCAmelCase__ = self.cutoffs[0]
UpperCAmelCase__ = len(self.cutoffs ) - 1
UpperCAmelCase__ = self.shortlist_size + self.n_clusters
UpperCAmelCase__ = keep_order
UpperCAmelCase__ = []
UpperCAmelCase__ = []
def UpperCAmelCase ( self : Dict , lowerCamelCase_ : Union[str, Any] ) ->Any:
'''simple docstring'''
if self.n_clusters > 0:
UpperCAmelCase__ = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer="""zeros""" , trainable=lowerCamelCase_ , name="""cluster_weight""" )
UpperCAmelCase__ = self.add_weight(
shape=(self.n_clusters,) , initializer="""zeros""" , trainable=lowerCamelCase_ , name="""cluster_bias""" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
UpperCAmelCase__ = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer="""zeros""" , trainable=lowerCamelCase_ , name=f'''out_projs_._{i}''' , )
self.out_projs.append(lowerCamelCase_ )
else:
self.out_projs.append(lowerCamelCase_ )
UpperCAmelCase__ = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer="""zeros""" , trainable=lowerCamelCase_ , name=f'''out_layers_._{i}_._weight''' , )
UpperCAmelCase__ = self.add_weight(
shape=(self.vocab_size,) , initializer="""zeros""" , trainable=lowerCamelCase_ , name=f'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
UpperCAmelCase__ , UpperCAmelCase__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase__ = self.d_embed // (self.div_val**i)
UpperCAmelCase__ = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer="""zeros""" , trainable=lowerCamelCase_ , name=f'''out_projs_._{i}''' )
self.out_projs.append(lowerCamelCase_ )
UpperCAmelCase__ = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer="""zeros""" , trainable=lowerCamelCase_ , name=f'''out_layers_._{i}_._weight''' , )
UpperCAmelCase__ = self.add_weight(
shape=(r_idx - l_idx,) , initializer="""zeros""" , trainable=lowerCamelCase_ , name=f'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
super().build(lowerCamelCase_ )
@staticmethod
def UpperCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : str , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[Any]=None ) ->Any:
'''simple docstring'''
UpperCAmelCase__ = x
if proj is not None:
UpperCAmelCase__ = tf.einsum("""ibd,ed->ibe""" , lowerCamelCase_ , lowerCamelCase_ )
return tf.einsum("""ibd,nd->ibn""" , lowerCamelCase_ , lowerCamelCase_ ) + b
@staticmethod
def UpperCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] ) ->Any:
'''simple docstring'''
UpperCAmelCase__ = shape_list(lowerCamelCase_ )
UpperCAmelCase__ = tf.range(lp_size[0] , dtype=target.dtype )
UpperCAmelCase__ = tf.stack([r, target] , 1 )
return tf.gather_nd(lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase ( self : int , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Dict=True , lowerCamelCase_ : Dict=False ) ->Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ = 0
if self.n_clusters == 0:
UpperCAmelCase__ = self._logit(lowerCamelCase_ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
UpperCAmelCase__ = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=lowerCamelCase_ , logits=lowerCamelCase_ )
UpperCAmelCase__ = tf.nn.log_softmax(lowerCamelCase_ , axis=-1 )
else:
UpperCAmelCase__ = shape_list(lowerCamelCase_ )
UpperCAmelCase__ = []
UpperCAmelCase__ = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
UpperCAmelCase__ , UpperCAmelCase__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
UpperCAmelCase__ = (target >= l_idx) & (target < r_idx)
UpperCAmelCase__ = tf.where(lowerCamelCase_ )
UpperCAmelCase__ = tf.boolean_mask(lowerCamelCase_ , lowerCamelCase_ ) - l_idx
if self.div_val == 1:
UpperCAmelCase__ = self.out_layers[0][0][l_idx:r_idx]
UpperCAmelCase__ = self.out_layers[0][1][l_idx:r_idx]
else:
UpperCAmelCase__ = self.out_layers[i][0]
UpperCAmelCase__ = self.out_layers[i][1]
if i == 0:
UpperCAmelCase__ = tf.concat([cur_W, self.cluster_weight] , 0 )
UpperCAmelCase__ = tf.concat([cur_b, self.cluster_bias] , 0 )
UpperCAmelCase__ = self._logit(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , self.out_projs[0] )
UpperCAmelCase__ = tf.nn.log_softmax(lowerCamelCase_ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
UpperCAmelCase__ = tf.boolean_mask(lowerCamelCase_ , lowerCamelCase_ )
UpperCAmelCase__ = self._gather_logprob(lowerCamelCase_ , lowerCamelCase_ )
else:
UpperCAmelCase__ = self._logit(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , self.out_projs[i] )
UpperCAmelCase__ = tf.nn.log_softmax(lowerCamelCase_ )
UpperCAmelCase__ = self.cutoffs[0] + i - 1 # No probability for the head cluster
UpperCAmelCase__ = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(lowerCamelCase_ )
if target is not None:
UpperCAmelCase__ = tf.boolean_mask(lowerCamelCase_ , lowerCamelCase_ )
UpperCAmelCase__ = tf.boolean_mask(lowerCamelCase_ , lowerCamelCase_ )
UpperCAmelCase__ = self._gather_logprob(lowerCamelCase_ , lowerCamelCase_ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(lowerCamelCase_ , -cur_logprob , shape_list(lowerCamelCase_ ) )
UpperCAmelCase__ = tf.concat(lowerCamelCase_ , axis=-1 )
if target is not None:
if return_mean:
UpperCAmelCase__ = tf.reduce_mean(lowerCamelCase_ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(lowerCamelCase_ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(lowerCamelCase_ , name=self.name , aggregation="""mean""" if return_mean else """""" )
return out
| 392 | 1 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class __A( __lowerCamelCase ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , ):
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_input_mask
UpperCamelCase__ = use_token_type_ids
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = num_labels
UpperCamelCase__ = num_choices
UpperCamelCase__ = scope
def UpperCAmelCase_ (self ):
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ (self ):
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = DistilBertModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = DistilBertForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = DistilBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase__ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = DistilBertForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = DistilBertForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self.num_choices
UpperCamelCase__ = DistilBertForMultipleChoice(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.prepare_config_and_inputs()
((UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__)) = config_and_inputs
UpperCamelCase__ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __A( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
SCREAMING_SNAKE_CASE__ = (
{
"""feature-extraction""": DistilBertModel,
"""fill-mask""": DistilBertForMaskedLM,
"""question-answering""": DistilBertForQuestionAnswering,
"""text-classification""": DistilBertForSequenceClassification,
"""token-classification""": DistilBertForTokenClassification,
"""zero-shot""": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
def UpperCAmelCase_ (self ):
UpperCamelCase__ = DistilBertModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , dim=37 )
def UpperCAmelCase_ (self ):
self.config_tester.run_common_tests()
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*SCREAMING_SNAKE_CASE_ )
@slow
def UpperCAmelCase_ (self ):
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ = DistilBertModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@slow
@require_torch_gpu
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
UpperCamelCase__ = True
UpperCamelCase__ = model_class(config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = torch.jit.trace(
SCREAMING_SNAKE_CASE_ , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , """traced_model.pt""" ) )
UpperCamelCase__ = torch.jit.load(os.path.join(SCREAMING_SNAKE_CASE_ , """traced_model.pt""" ) , map_location=SCREAMING_SNAKE_CASE_ )
loaded(inputs_dict["""input_ids"""].to(SCREAMING_SNAKE_CASE_ ) , inputs_dict["""attention_mask"""].to(SCREAMING_SNAKE_CASE_ ) )
@require_torch
class __A( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase_ (self ):
UpperCamelCase__ = DistilBertModel.from_pretrained("""distilbert-base-uncased""" )
UpperCamelCase__ = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
UpperCamelCase__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )[0]
UpperCamelCase__ = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = torch.tensor(
[[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
| 86 |
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
lowerCamelCase_ = {
'''distilbert''': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'''roberta''': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'''bert''': (BertConfig, BertForMaskedLM, BertTokenizer),
'''gpt2''': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def __magic_name__ ( __a : Any ):
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def __magic_name__ ( __a : List[Any] , __a : Any ):
'''simple docstring'''
if args.student_type == "roberta":
UpperCamelCase__ = False
elif args.student_type == "gpt2":
UpperCamelCase__ = False
def __magic_name__ ( __a : int , __a : Dict ):
'''simple docstring'''
if args.student_type == "roberta":
UpperCamelCase__ = False
def __magic_name__ ( ):
'''simple docstring'''
UpperCamelCase__ = argparse.ArgumentParser(description="""Training""" )
parser.add_argument("""--force""" , action="""store_true""" , help="""Overwrite dump_path if it already exists.""" )
parser.add_argument(
"""--dump_path""" , type=__a , required=__a , help="""The output directory (log, checkpoints, parameters, etc.)""" )
parser.add_argument(
"""--data_file""" , type=__a , required=__a , help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""" , )
parser.add_argument(
"""--student_type""" , type=__a , choices=["""distilbert""", """roberta""", """gpt2"""] , required=__a , help="""The student type (DistilBERT, RoBERTa).""" , )
parser.add_argument("""--student_config""" , type=__a , required=__a , help="""Path to the student configuration.""" )
parser.add_argument(
"""--student_pretrained_weights""" , default=__a , type=__a , help="""Load student initialization checkpoint.""" )
parser.add_argument(
"""--teacher_type""" , choices=["""bert""", """roberta""", """gpt2"""] , required=__a , help="""Teacher type (BERT, RoBERTa).""" )
parser.add_argument("""--teacher_name""" , type=__a , required=__a , help="""The teacher model.""" )
parser.add_argument("""--temperature""" , default=2.0 , type=__a , help="""Temperature for the softmax temperature.""" )
parser.add_argument(
"""--alpha_ce""" , default=0.5 , type=__a , help="""Linear weight for the distillation loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_mlm""" , default=0.0 , type=__a , help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""" , )
parser.add_argument("""--alpha_clm""" , default=0.5 , type=__a , help="""Linear weight for the CLM loss. Must be >=0.""" )
parser.add_argument("""--alpha_mse""" , default=0.0 , type=__a , help="""Linear weight of the MSE loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_cos""" , default=0.0 , type=__a , help="""Linear weight of the cosine embedding loss. Must be >=0.""" )
parser.add_argument(
"""--mlm""" , action="""store_true""" , help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""" )
parser.add_argument(
"""--mlm_mask_prop""" , default=0.15 , type=__a , help="""Proportion of tokens for which we need to make a prediction.""" , )
parser.add_argument("""--word_mask""" , default=0.8 , type=__a , help="""Proportion of tokens to mask out.""" )
parser.add_argument("""--word_keep""" , default=0.1 , type=__a , help="""Proportion of tokens to keep.""" )
parser.add_argument("""--word_rand""" , default=0.1 , type=__a , help="""Proportion of tokens to randomly replace.""" )
parser.add_argument(
"""--mlm_smoothing""" , default=0.7 , type=__a , help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""" , )
parser.add_argument("""--token_counts""" , type=__a , help="""The token counts in the data_file for MLM.""" )
parser.add_argument(
"""--restrict_ce_to_mask""" , action="""store_true""" , help="""If true, compute the distillation loss only the [MLM] prediction distribution.""" , )
parser.add_argument(
"""--freeze_pos_embs""" , action="""store_true""" , help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""" , )
parser.add_argument(
"""--freeze_token_type_embds""" , action="""store_true""" , help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""" , )
parser.add_argument("""--n_epoch""" , type=__a , default=3 , help="""Number of pass on the whole dataset.""" )
parser.add_argument("""--batch_size""" , type=__a , default=5 , help="""Batch size (for each process).""" )
parser.add_argument(
"""--group_by_size""" , action="""store_false""" , help="""If true, group sequences that have similar length into the same batch. Default is true.""" , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=__a , default=50 , help="""Gradient accumulation for larger training batches.""" , )
parser.add_argument("""--warmup_prop""" , default=0.05 , type=__a , help="""Linear warmup proportion.""" )
parser.add_argument("""--weight_decay""" , default=0.0 , type=__a , help="""Weight decay if we apply some.""" )
parser.add_argument("""--learning_rate""" , default=5E-4 , type=__a , help="""The initial learning rate for Adam.""" )
parser.add_argument("""--adam_epsilon""" , default=1E-6 , type=__a , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--max_grad_norm""" , default=5.0 , type=__a , help="""Max gradient norm.""" )
parser.add_argument("""--initializer_range""" , default=0.02 , type=__a , help="""Random initialization range.""" )
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=__a , default="""O1""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_gpu""" , type=__a , default=1 , help="""Number of GPUs in the node.""" )
parser.add_argument("""--local_rank""" , type=__a , default=-1 , help="""Distributed training - Local rank""" )
parser.add_argument("""--seed""" , type=__a , default=56 , help="""Random seed""" )
parser.add_argument("""--log_interval""" , type=__a , default=500 , help="""Tensorboard logging interval.""" )
parser.add_argument("""--checkpoint_interval""" , type=__a , default=4_000 , help="""Checkpoint interval.""" )
UpperCamelCase__ = parser.parse_args()
sanity_checks(__a )
# ARGS #
init_gpu_params(__a )
set_seed(__a )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f"Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"
""" itUse `--force` if you want to overwrite it""" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f"Experiment will be dumped and logged in {args.dump_path}" )
# SAVE PARAMS #
logger.info(f"Param: {args}" )
with open(os.path.join(args.dump_path , """parameters.json""" ) , """w""" ) as f:
json.dump(vars(__a ) , __a , indent=4 )
git_log(args.dump_path )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = MODEL_CLASSES[args.student_type]
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
UpperCamelCase__ = teacher_tokenizer_class.from_pretrained(args.teacher_name )
UpperCamelCase__ = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
UpperCamelCase__ = tokenizer.all_special_tokens.index(__a )
UpperCamelCase__ = tokenizer.all_special_ids[idx]
logger.info(f"Special tokens {special_tok_ids}" )
UpperCamelCase__ = special_tok_ids
UpperCamelCase__ = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f"Loading data from {args.data_file}" )
with open(args.data_file , """rb""" ) as fp:
UpperCamelCase__ = pickle.load(__a )
if args.mlm:
logger.info(f"Loading token counts from {args.token_counts} (already pre-computed)" )
with open(args.token_counts , """rb""" ) as fp:
UpperCamelCase__ = pickle.load(__a )
UpperCamelCase__ = np.maximum(__a , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
UpperCamelCase__ = 0.0 # do not predict special tokens
UpperCamelCase__ = torch.from_numpy(__a )
else:
UpperCamelCase__ = None
UpperCamelCase__ = LmSeqsDataset(params=__a , data=__a )
logger.info("""Data loader created.""" )
# STUDENT #
logger.info(f"Loading student config from {args.student_config}" )
UpperCamelCase__ = student_config_class.from_pretrained(args.student_config )
UpperCamelCase__ = True
if args.student_pretrained_weights is not None:
logger.info(f"Loading pretrained weights from {args.student_pretrained_weights}" )
UpperCamelCase__ = student_model_class.from_pretrained(args.student_pretrained_weights , config=__a )
else:
UpperCamelCase__ = student_model_class(__a )
if args.n_gpu > 0:
student.to(f"cuda:{args.local_rank}" )
logger.info("""Student loaded.""" )
# TEACHER #
UpperCamelCase__ = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=__a )
if args.n_gpu > 0:
teacher.to(f"cuda:{args.local_rank}" )
logger.info(f"Teacher loaded from {args.teacher_name}." )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(__a , __a )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(__a , __a )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
UpperCamelCase__ = Distiller(
params=__a , dataset=__a , token_probs=__a , student=__a , teacher=__a )
distiller.train()
logger.info("""Let's go get some drinks.""" )
if __name__ == "__main__":
main()
| 86 | 1 |
"""simple docstring"""
from __future__ import annotations
from math import pow, sqrt
def lowercase__(A , A , A ) ->Optional[Any]:
"""simple docstring"""
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance == 0:
return {"resistance": sqrt(pow(snake_case__ , 2 ) - pow(snake_case__ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(snake_case__ , 2 ) - pow(snake_case__ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(snake_case__ , 2 ) + pow(snake_case__ , 2 ) )}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 218 |
"""simple docstring"""
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
A_ = logging.getLogger()
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Any = argparse.ArgumentParser()
parser.add_argument("""-f""" )
_snake_case : List[str] = parser.parse_args()
return args.f
def UpperCAmelCase__ (snake_case__ : Optional[Any] ):
"""simple docstring"""
_snake_case : int = {}
_snake_case : Tuple = os.path.join(snake_case__ , """all_results.json""" )
if os.path.exists(snake_case__ ):
with open(snake_case__ , """r""" ) as f:
_snake_case : Dict = json.load(snake_case__ )
else:
raise ValueError(F"can't find {path}" )
return results
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : str = torch.cuda.is_available() and torch_device == """cuda"""
return is_using_cuda and is_apex_available()
A_ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowercase( __a ):
'''simple docstring'''
@classmethod
def UpperCamelCase_ ( cls: Optional[Any] ):
'''simple docstring'''
_snake_case : List[str] = tempfile.mkdtemp()
_snake_case : Optional[Any] = os.path.join(cls.tmpdir, """default_config.yml""" )
write_basic_config(save_location=cls.configPath )
_snake_case : Dict = ["""accelerate""", """launch""", """--config_file""", cls.configPath]
@classmethod
def UpperCamelCase_ ( cls: Optional[Any] ):
'''simple docstring'''
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ, {"""WANDB_MODE""": """offline"""} )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = self.get_auto_remove_tmp_dir()
_snake_case : str = f"\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n ".split()
if is_cuda_and_apex_available():
testargs.append("""--fp16""" )
run_command(self._launch_args + testargs )
_snake_case : Tuple = get_results(a_ )
self.assertGreaterEqual(result["""eval_accuracy"""], 0.75 )
self.assertTrue(os.path.exists(os.path.join(a_, """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(a_, """glue_no_trainer""" ) ) )
@mock.patch.dict(os.environ, {"""WANDB_MODE""": """offline"""} )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = self.get_auto_remove_tmp_dir()
_snake_case : Optional[Any] = f"\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n ".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
_snake_case : Tuple = get_results(a_ )
self.assertLess(result["""perplexity"""], 100 )
self.assertTrue(os.path.exists(os.path.join(a_, """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(a_, """clm_no_trainer""" ) ) )
@mock.patch.dict(os.environ, {"""WANDB_MODE""": """offline"""} )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : List[str] = self.get_auto_remove_tmp_dir()
_snake_case : Dict = f"\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
_snake_case : Union[str, Any] = get_results(a_ )
self.assertLess(result["""perplexity"""], 42 )
self.assertTrue(os.path.exists(os.path.join(a_, """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(a_, """mlm_no_trainer""" ) ) )
@mock.patch.dict(os.environ, {"""WANDB_MODE""": """offline"""} )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Tuple = 7 if get_gpu_count() > 1 else 2
_snake_case : str = self.get_auto_remove_tmp_dir()
_snake_case : Any = f"\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
_snake_case : List[str] = get_results(a_ )
self.assertGreaterEqual(result["""eval_accuracy"""], 0.75 )
self.assertLess(result["""train_loss"""], 0.5 )
self.assertTrue(os.path.exists(os.path.join(a_, """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(a_, """ner_no_trainer""" ) ) )
@unittest.skip(reason="""Fix me @muellerzr""" )
@mock.patch.dict(os.environ, {"""WANDB_MODE""": """offline"""} )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : Tuple = self.get_auto_remove_tmp_dir()
_snake_case : List[str] = f"\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
_snake_case : Union[str, Any] = get_results(a_ )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["""eval_f1"""], 28 )
self.assertGreaterEqual(result["""eval_exact"""], 28 )
self.assertTrue(os.path.exists(os.path.join(a_, """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(a_, """qa_no_trainer""" ) ) )
@mock.patch.dict(os.environ, {"""WANDB_MODE""": """offline"""} )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : Any = self.get_auto_remove_tmp_dir()
_snake_case : Optional[int] = f"\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
_snake_case : Tuple = get_results(a_ )
self.assertGreaterEqual(result["""eval_accuracy"""], 0.8 )
self.assertTrue(os.path.exists(os.path.join(a_, """swag_no_trainer""" ) ) )
@slow
@mock.patch.dict(os.environ, {"""WANDB_MODE""": """offline"""} )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : int = self.get_auto_remove_tmp_dir()
_snake_case : Tuple = f"\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
_snake_case : Any = get_results(a_ )
self.assertGreaterEqual(result["""eval_rouge1"""], 10 )
self.assertGreaterEqual(result["""eval_rouge2"""], 2 )
self.assertGreaterEqual(result["""eval_rougeL"""], 7 )
self.assertGreaterEqual(result["""eval_rougeLsum"""], 7 )
self.assertTrue(os.path.exists(os.path.join(a_, """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(a_, """summarization_no_trainer""" ) ) )
@slow
@mock.patch.dict(os.environ, {"""WANDB_MODE""": """offline"""} )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Dict = self.get_auto_remove_tmp_dir()
_snake_case : Optional[int] = f"\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
_snake_case : int = get_results(a_ )
self.assertGreaterEqual(result["""eval_bleu"""], 30 )
self.assertTrue(os.path.exists(os.path.join(a_, """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(a_, """translation_no_trainer""" ) ) )
@slow
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Dict = logging.StreamHandler(sys.stdout )
logger.addHandler(a_ )
_snake_case : Optional[Any] = self.get_auto_remove_tmp_dir()
_snake_case : Optional[int] = f"\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n ".split()
run_command(self._launch_args + testargs )
_snake_case : Tuple = get_results(a_ )
self.assertGreaterEqual(result["""eval_overall_accuracy"""], 0.10 )
@mock.patch.dict(os.environ, {"""WANDB_MODE""": """offline"""} )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Optional[int] = self.get_auto_remove_tmp_dir()
_snake_case : Union[str, Any] = f"\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n ".split()
if is_cuda_and_apex_available():
testargs.append("""--fp16""" )
run_command(self._launch_args + testargs )
_snake_case : Dict = get_results(a_ )
# The base model scores a 25%
self.assertGreaterEqual(result["""eval_accuracy"""], 0.6 )
self.assertTrue(os.path.exists(os.path.join(a_, """step_1""" ) ) )
self.assertTrue(os.path.exists(os.path.join(a_, """image_classification_no_trainer""" ) ) )
| 609 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase = {
"""configuration_mobilebert""": [
"""MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""MobileBertConfig""",
"""MobileBertOnnxConfig""",
],
"""tokenization_mobilebert""": ["""MobileBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ["""MobileBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileBertForMaskedLM""",
"""MobileBertForMultipleChoice""",
"""MobileBertForNextSentencePrediction""",
"""MobileBertForPreTraining""",
"""MobileBertForQuestionAnswering""",
"""MobileBertForSequenceClassification""",
"""MobileBertForTokenClassification""",
"""MobileBertLayer""",
"""MobileBertModel""",
"""MobileBertPreTrainedModel""",
"""load_tf_weights_in_mobilebert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileBertForMaskedLM""",
"""TFMobileBertForMultipleChoice""",
"""TFMobileBertForNextSentencePrediction""",
"""TFMobileBertForPreTraining""",
"""TFMobileBertForQuestionAnswering""",
"""TFMobileBertForSequenceClassification""",
"""TFMobileBertForTokenClassification""",
"""TFMobileBertMainLayer""",
"""TFMobileBertModel""",
"""TFMobileBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 14 |
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = x
UpperCAmelCase_ = y
for step in range(lowerCAmelCase__ ): # noqa: B007
UpperCAmelCase_ = a * a - b * b + x
UpperCAmelCase_ = 2 * a * b + y
UpperCAmelCase_ = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def a__ ( lowerCAmelCase__ ):
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def a__ ( lowerCAmelCase__ ):
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(lowerCAmelCase__ , 1 , 1 ) )
def a__ ( lowerCAmelCase__ = 800 , lowerCAmelCase__ = 600 , lowerCAmelCase__ = -0.6 , lowerCAmelCase__ = 0 , lowerCAmelCase__ = 3.2 , lowerCAmelCase__ = 50 , lowerCAmelCase__ = True , ):
UpperCAmelCase_ = Image.new("RGB" , (image_width, image_height) )
UpperCAmelCase_ = img.load()
# loop through the image-coordinates
for image_x in range(lowerCAmelCase__ ):
for image_y in range(lowerCAmelCase__ ):
# determine the figure-coordinates based on the image-coordinates
UpperCAmelCase_ = figure_width / image_width * image_height
UpperCAmelCase_ = figure_center_x + (image_x / image_width - 0.5) * figure_width
UpperCAmelCase_ = figure_center_y + (image_y / image_height - 0.5) * figure_height
UpperCAmelCase_ = get_distance(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
UpperCAmelCase_ = get_color_coded_rgb(lowerCAmelCase__ )
else:
UpperCAmelCase_ = get_black_and_white_rgb(lowerCAmelCase__ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowerCamelCase = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 14 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
A__ : int = {"""configuration_speech_encoder_decoder""": ["""SpeechEncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : int = ["""SpeechEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Dict = ["""FlaxSpeechEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
A__ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 13 | """simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__:Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Optional[int] = {
"""asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class snake_case__ ( snake_case_ ):
_snake_case : Union[str, Any] = """sew-d"""
def __init__( self , lowerCamelCase=32 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase=2 , lowerCamelCase=512 , lowerCamelCase=256 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=("p2c", "c2p") , lowerCamelCase="layer_norm" , lowerCamelCase="gelu_python" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=0.0 , lowerCamelCase=0.1 , lowerCamelCase=0.02 , lowerCamelCase=1E-7 , lowerCamelCase=1E-5 , lowerCamelCase="group" , lowerCamelCase="gelu" , lowerCamelCase=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , lowerCamelCase=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowerCamelCase=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowerCamelCase=False , lowerCamelCase=128 , lowerCamelCase=16 , lowerCamelCase=True , lowerCamelCase=0.05 , lowerCamelCase=10 , lowerCamelCase=2 , lowerCamelCase=0.0 , lowerCamelCase=10 , lowerCamelCase=0 , lowerCamelCase="mean" , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=256 , lowerCamelCase=0 , lowerCamelCase=1 , lowerCamelCase=2 , **lowerCamelCase , ):
super().__init__(**lowerCamelCase , pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase )
__a = hidden_size
__a = feat_extract_norm
__a = feat_extract_activation
__a = list(lowerCamelCase )
__a = list(lowerCamelCase )
__a = list(lowerCamelCase )
__a = conv_bias
__a = num_conv_pos_embeddings
__a = num_conv_pos_embedding_groups
__a = len(self.conv_dim )
__a = num_hidden_layers
__a = intermediate_size
__a = squeeze_factor
__a = max_position_embeddings
__a = position_buckets
__a = share_att_key
__a = relative_attention
__a = norm_rel_ebd
__a = list(lowerCamelCase )
__a = hidden_act
__a = num_attention_heads
__a = hidden_dropout
__a = attention_dropout
__a = activation_dropout
__a = feat_proj_dropout
__a = final_dropout
__a = layer_norm_eps
__a = feature_layer_norm_eps
__a = initializer_range
__a = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect."
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"
F"but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"
F"= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__a = apply_spec_augment
__a = mask_time_prob
__a = mask_time_length
__a = mask_time_min_masks
__a = mask_feature_prob
__a = mask_feature_length
__a = mask_feature_min_masks
# ctc loss
__a = ctc_loss_reduction
__a = ctc_zero_infinity
# sequence classification
__a = use_weighted_layer_sum
__a = classifier_proj_size
@property
def a__ ( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 528 | 0 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = 32
def A_ ( __lowercase , __lowercase = 16 ):
UpperCamelCase_ : Optional[int] =AutoTokenizer.from_pretrained('bert-base-cased' )
UpperCamelCase_ : Any =load_dataset('glue' , 'mrpc' )
def tokenize_function(__lowercase ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase_ : Dict =tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__lowercase , max_length=__lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase_ : Optional[int] =datasets.map(
__lowercase , batched=__lowercase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase_ : int =tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(__lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase_ : Dict =1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase_ : Dict =16
elif accelerator.mixed_precision != "no":
UpperCamelCase_ : str =8
else:
UpperCamelCase_ : str =None
return tokenizer.pad(
__lowercase , padding='longest' , max_length=__lowercase , pad_to_multiple_of=__lowercase , return_tensors='pt' , )
# Instantiate dataloaders.
UpperCamelCase_ : int =DataLoader(
tokenized_datasets['train'] , shuffle=__lowercase , collate_fn=__lowercase , batch_size=__lowercase )
UpperCamelCase_ : List[str] =DataLoader(
tokenized_datasets['validation'] , shuffle=__lowercase , collate_fn=__lowercase , batch_size=__lowercase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__SCREAMING_SNAKE_CASE = mocked_dataloaders # noqa: F811
def A_ ( __lowercase , __lowercase ):
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , __lowercase ) == "1":
UpperCamelCase_ : str =2
# Initialize accelerator
UpperCamelCase_ : Union[str, Any] =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase_ : Union[str, Any] =config['lr']
UpperCamelCase_ : str =int(config['num_epochs'] )
UpperCamelCase_ : Tuple =int(config['seed'] )
UpperCamelCase_ : str =int(config['batch_size'] )
UpperCamelCase_ : Optional[int] =evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
UpperCamelCase_ : str =1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCamelCase_ : Optional[Any] =batch_size // MAX_GPU_BATCH_SIZE
UpperCamelCase_ : Dict =MAX_GPU_BATCH_SIZE
set_seed(__lowercase )
UpperCamelCase_ : str =get_dataloaders(__lowercase , __lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase_ : Tuple =AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=__lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase_ : Optional[Any] =model.to(accelerator.device )
# Instantiate optimizer
UpperCamelCase_ : List[Any] =AdamW(params=model.parameters() , lr=__lowercase )
# Instantiate scheduler
UpperCamelCase_ : List[Any] =get_linear_schedule_with_warmup(
optimizer=__lowercase , num_warmup_steps=1_00 , num_training_steps=(len(__lowercase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase_ : List[Any] =accelerator.prepare(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
# Now we train the model
for epoch in range(__lowercase ):
model.train()
for step, batch in enumerate(__lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCamelCase_ : Optional[Any] =model(**__lowercase )
UpperCamelCase_ : Union[str, Any] =outputs.loss
UpperCamelCase_ : List[Any] =loss / gradient_accumulation_steps
accelerator.backward(__lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
UpperCamelCase_ : Any =0
for step, batch in enumerate(__lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase_ : Union[str, Any] =model(**__lowercase )
UpperCamelCase_ : Optional[int] =outputs.logits.argmax(dim=-1 )
UpperCamelCase_ : List[Any] =accelerator.gather((predictions, batch['labels']) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(__lowercase ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
UpperCamelCase_ : Dict =predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCamelCase_ : Union[str, Any] =references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=__lowercase , references=__lowercase , )
UpperCamelCase_ : List[Any] =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , __lowercase )
def A_ ( ):
UpperCamelCase_ : Tuple =argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=__lowercase , default=__lowercase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
UpperCamelCase_ : int =parser.parse_args()
UpperCamelCase_ : Tuple ={'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(__lowercase , __lowercase )
if __name__ == "__main__":
main()
| 713 |
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
__SCREAMING_SNAKE_CASE = imread(r'digital_image_processing/image_data/lena_small.jpg')
__SCREAMING_SNAKE_CASE = cvtColor(img, COLOR_BGR2GRAY)
def A_ ( ):
UpperCamelCase_ : List[str] =cn.convert_to_negative(__lowercase )
# assert negative_img array for at least one True
assert negative_img.any()
def A_ ( ):
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(__lowercase , 1_10 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def A_ ( ):
UpperCamelCase_ : Dict =canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def A_ ( ):
UpperCamelCase_ : Any =imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
UpperCamelCase_ : Tuple =canny.canny(__lowercase )
# assert canny array for at least one True
assert canny_array.any()
def A_ ( ):
assert gg.gaussian_filter(__lowercase , 5 , sigma=0.9 ).all()
def A_ ( ):
# laplace diagonals
UpperCamelCase_ : Dict =array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
UpperCamelCase_ : Optional[Any] =conv.img_convolve(__lowercase , __lowercase ).astype(__lowercase )
assert res.any()
def A_ ( ):
assert med.median_filter(__lowercase , 3 ).any()
def A_ ( ):
UpperCamelCase_ , UpperCamelCase_ : List[str] =sob.sobel_filter(__lowercase )
assert grad.any() and theta.any()
def A_ ( ):
UpperCamelCase_ : Dict =sp.make_sepia(__lowercase , 20 )
assert sepia.all()
def A_ ( __lowercase = "digital_image_processing/image_data/lena_small.jpg" ):
UpperCamelCase_ : Optional[Any] =bs.Burkes(imread(__lowercase , 1 ) , 1_20 )
burkes.process()
assert burkes.output_img.any()
def A_ ( __lowercase = "digital_image_processing/image_data/lena_small.jpg" , ):
UpperCamelCase_ : Optional[Any] =rs.NearestNeighbour(imread(__lowercase , 1 ) , 4_00 , 2_00 )
nn.process()
assert nn.output.any()
def A_ ( ):
UpperCamelCase_ : Optional[int] ='digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
UpperCamelCase_ : Optional[Any] =imread(__lowercase , 0 )
# Test for get_neighbors_pixel function() return not None
UpperCamelCase_ : Optional[Any] =0
UpperCamelCase_ : Optional[Any] =0
UpperCamelCase_ : int =image[x_coordinate][y_coordinate]
UpperCamelCase_ : Dict =lbp.get_neighbors_pixel(
__lowercase , __lowercase , __lowercase , __lowercase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
UpperCamelCase_ : Tuple =np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
UpperCamelCase_ : Optional[Any] =lbp.local_binary_value(__lowercase , __lowercase , __lowercase )
assert lbp_image.any()
| 395 | 0 |
'''simple docstring'''
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def __UpperCAmelCase ( lowerCamelCase_) -> Tuple:
UpperCamelCase__ : List[str] = [False] * len(lowerCamelCase_)
UpperCamelCase__ : Optional[Any] = [-1] * len(lowerCamelCase_)
def dfs(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase__ : List[Any] = True
UpperCamelCase__ : List[str] = c
for u in graph[v]:
if not visited[u]:
dfs(lowerCamelCase_ , 1 - c)
for i in range(len(lowerCamelCase_)):
if not visited[i]:
dfs(lowerCamelCase_ , 0)
for i in range(len(lowerCamelCase_)):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
lowerCAmelCase__ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 596 |
'''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase ( lowerCamelCase_) -> bool:
return len(set(lowerCamelCase_)) == len(lowerCamelCase_)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 596 | 1 |
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
__lowercase :List[str] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
__lowercase :Union[str, Any] = [0, 25, 50]
__lowercase :Optional[int] = [25, 50, 75]
__lowercase :Union[str, Any] = fuzz.membership.trimf(X, abca)
__lowercase :int = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
__lowercase :int = np.ones(75)
__lowercase :List[str] = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
__lowercase :str = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
__lowercase :str = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
__lowercase :List[Any] = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
__lowercase :str = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
__lowercase :int = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
__lowercase :str = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
__lowercase :List[str] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
__lowercase :Union[str, Any] = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title("Young")
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title("Middle aged")
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title("union")
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title("intersection")
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title("complement_a")
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title("difference a/b")
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title("alg_sum")
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title("alg_product")
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title("bdd_sum")
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title("bdd_difference")
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show() | 706 |
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : bool = False ):
'''simple docstring'''
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_317_044_064_679_887_385_961_981 and not allow_probable:
raise ValueError(
"Warning: upper bound of deterministic test is exceeded. "
"Pass allow_probable=True to allow probabilistic test. "
"A return value of True indicates a probable prime." )
# array bounds provided by analysis
SCREAMING_SNAKE_CASE__ : List[str] = [
2_047,
1_373_653,
25_326_001,
3_215_031_751,
2_152_302_898_747,
3_474_749_660_383,
341_550_071_728_321,
1,
3_825_123_056_546_413_051,
1,
1,
318_665_857_834_031_151_167_461,
3_317_044_064_679_887_385_961_981,
]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(_lowerCamelCase , 1 ):
if n < _p:
# then we have our last prime to check
SCREAMING_SNAKE_CASE__ : Dict = primes[:idx]
break
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
SCREAMING_SNAKE_CASE__ : str = False
for r in range(_lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = pow(_lowerCamelCase , d * 2**r , _lowerCamelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
SCREAMING_SNAKE_CASE__ : str = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def UpperCAmelCase ( ):
'''simple docstring'''
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(838_201 )
assert miller_rabin(838_207 )
# 1_373_653
assert not miller_rabin(17_316_001 )
assert miller_rabin(17_316_017 )
# 25_326_001
assert not miller_rabin(3_078_386_641 )
assert miller_rabin(3_078_386_653 )
# 3_215_031_751
assert not miller_rabin(1_713_045_574_801 )
assert miller_rabin(1_713_045_574_819 )
# 2_152_302_898_747
assert not miller_rabin(2_779_799_728_307 )
assert miller_rabin(2_779_799_728_327 )
# 3_474_749_660_383
assert not miller_rabin(113_850_023_909_441 )
assert miller_rabin(113_850_023_909_527 )
# 341_550_071_728_321
assert not miller_rabin(1_275_041_018_848_804_351 )
assert miller_rabin(1_275_041_018_848_804_391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(79_666_464_458_507_787_791_867 )
assert miller_rabin(79_666_464_458_507_787_791_951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(552_840_677_446_647_897_660_333 )
assert miller_rabin(552_840_677_446_647_897_660_359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin() | 26 | 0 |
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str]=13 , _lowerCAmelCase : Any=7 , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : Any=99 , _lowerCAmelCase : int=32 , _lowerCAmelCase : List[str]=5 , _lowerCAmelCase : List[str]=4 , _lowerCAmelCase : Optional[Any]=37 , _lowerCAmelCase : int="gelu" , _lowerCAmelCase : int=0.1 , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : str=512 , _lowerCAmelCase : Tuple=16 , _lowerCAmelCase : Optional[int]=2 , _lowerCAmelCase : Any=0.02 , _lowerCAmelCase : Optional[Any]=4 , ):
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = seq_length
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_attention_mask
SCREAMING_SNAKE_CASE_ = use_token_type_ids
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = type_sequence_label_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = num_choices
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE_ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_ = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = config_and_inputs
SCREAMING_SNAKE_CASE_ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase_ ( self : Tuple ):
SCREAMING_SNAKE_CASE_ = FlaxAlbertModelTester(self )
@slow
def lowerCAmelCase_ ( self : Tuple ):
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class_name.from_pretrained('albert-base-v2' )
SCREAMING_SNAKE_CASE_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCAmelCase )
@require_flax
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = FlaxAlbertModel.from_pretrained('albert-base-v2' )
SCREAMING_SNAKE_CASE_ = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
SCREAMING_SNAKE_CASE_ = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE_ = (1, 11, 768)
self.assertEqual(output.shape , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _lowerCAmelCase , atol=1E-4 ) ) | 31 |
import collections
import os
import re
from pathlib import Path
lowerCamelCase_ : Optional[Any] = """src/transformers"""
# Matches is_xxx_available()
lowerCamelCase_ : Union[str, Any] = re.compile(r"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
lowerCamelCase_ : int = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowerCamelCase_ : Union[str, Any] = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
lowerCamelCase_ : Any = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
lowerCamelCase_ : Any = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowerCamelCase_ : List[Any] = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
lowerCamelCase_ : Any = re.compile(r"""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
lowerCamelCase_ : Tuple = re.compile(r"""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
lowerCamelCase_ : Tuple = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
lowerCamelCase_ : Dict = re.compile(r"""^\s*try:""")
# Catches a line with else:
lowerCamelCase_ : Union[str, Any] = re.compile(r"""^\s*else:""")
def A__ ( lowerCamelCase ) -> List[Any]:
if _re_test_backend.search(lowerCamelCase ) is None:
return None
UpperCamelCase_: Any = [b[0] for b in _re_backend.findall(lowerCamelCase )]
backends.sort()
return "_and_".join(lowerCamelCase )
def A__ ( lowerCamelCase ) -> Union[str, Any]:
with open(lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
UpperCamelCase_: Dict = f.readlines()
UpperCamelCase_: Tuple = 0
while line_index < len(lowerCamelCase ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(lowerCamelCase ):
return None
# First grab the objects without a specific backend in _import_structure
UpperCamelCase_: Optional[int] = []
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
UpperCamelCase_: Any = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(lowerCamelCase ):
UpperCamelCase_: str = _re_one_line_import_struct.search(lowerCamelCase ).groups()[0]
UpperCamelCase_: Tuple = re.findall(r"""\[([^\]]+)\]""" , lowerCamelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
UpperCamelCase_: Any = _re_import_struct_key_value.search(lowerCamelCase )
if single_line_import_search is not None:
UpperCamelCase_: Any = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(lowerCamelCase ) > 0]
objects.extend(lowerCamelCase )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
UpperCamelCase_: Optional[Any] = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
UpperCamelCase_: Any = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCamelCase_: Any = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCamelCase_: int = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
UpperCamelCase_: Tuple = lines[line_index]
if _re_import_struct_add_one.search(lowerCamelCase ) is not None:
objects.append(_re_import_struct_add_one.search(lowerCamelCase ).groups()[0] )
elif _re_import_struct_add_many.search(lowerCamelCase ) is not None:
UpperCamelCase_: List[str] = _re_import_struct_add_many.search(lowerCamelCase ).groups()[0].split(""", """ )
UpperCamelCase_: str = [obj[1:-1] for obj in imports if len(lowerCamelCase ) > 0]
objects.extend(lowerCamelCase )
elif _re_between_brackets.search(lowerCamelCase ) is not None:
UpperCamelCase_: Tuple = _re_between_brackets.search(lowerCamelCase ).groups()[0].split(""", """ )
UpperCamelCase_: List[Any] = [obj[1:-1] for obj in imports if len(lowerCamelCase ) > 0]
objects.extend(lowerCamelCase )
elif _re_quote_object.search(lowerCamelCase ) is not None:
objects.append(_re_quote_object.search(lowerCamelCase ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 12 + """\"""" ):
objects.append(line[13:-3] )
line_index += 1
UpperCamelCase_: List[str] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
UpperCamelCase_: List[str] = []
while (
line_index < len(lowerCamelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
UpperCamelCase_: List[str] = lines[line_index]
UpperCamelCase_: Union[str, Any] = _re_import.search(lowerCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
UpperCamelCase_: Any = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(lowerCamelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
UpperCamelCase_: str = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCamelCase_: List[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCamelCase_: int = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
UpperCamelCase_: Tuple = lines[line_index]
UpperCamelCase_: Union[str, Any] = _re_import.search(lowerCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 12 ):
objects.append(line[12:-2] )
line_index += 1
UpperCamelCase_: Optional[Any] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def A__ ( lowerCamelCase , lowerCamelCase ) -> List[Any]:
def find_duplicates(lowerCamelCase ):
return [k for k, v in collections.Counter(lowerCamelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
UpperCamelCase_: Optional[int] = []
for key in import_dict_objects.keys():
UpperCamelCase_: List[str] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
UpperCamelCase_: Tuple = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
UpperCamelCase_: int = """base imports""" if key == """none""" else F'''{key} backend'''
errors.append(F'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def A__ ( ) -> Tuple:
UpperCamelCase_: Optional[int] = []
for root, _, files in os.walk(lowerCamelCase ):
if "__init__.py" in files:
UpperCamelCase_: Union[str, Any] = os.path.join(lowerCamelCase , """__init__.py""" )
UpperCamelCase_: Optional[int] = parse_init(lowerCamelCase )
if objects is not None:
UpperCamelCase_: Any = analyze_results(*lowerCamelCase )
if len(lowerCamelCase ) > 0:
UpperCamelCase_: Any = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append("""\n""".join(lowerCamelCase ) )
if len(lowerCamelCase ) > 0:
raise ValueError("""\n\n""".join(lowerCamelCase ) )
def A__ ( ) -> Any:
UpperCamelCase_: List[Any] = []
for path, directories, files in os.walk(lowerCamelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(lowerCamelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(lowerCamelCase ) / folder).glob("""*.py""" ) ) ) == 0:
continue
UpperCamelCase_: str = str((Path(lowerCamelCase ) / folder).relative_to(lowerCamelCase ) )
UpperCamelCase_: Optional[Any] = short_path.replace(os.path.sep , """.""" )
submodules.append(lowerCamelCase )
for fname in files:
if fname == "__init__.py":
continue
UpperCamelCase_: Dict = str((Path(lowerCamelCase ) / fname).relative_to(lowerCamelCase ) )
UpperCamelCase_: Optional[int] = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(lowerCamelCase )
return submodules
lowerCamelCase_ : Optional[int] = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
"""models.esm.openfold_utils""",
]
def A__ ( ) -> List[str]:
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
UpperCamelCase_: Optional[Any] = direct_transformers_import(lowerCamelCase )
UpperCamelCase_: Tuple = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(lowerCamelCase , """__init__.py""" ) , """r""" ) as f:
UpperCamelCase_: List[Any] = f.read()
import_structure_keys.update(set(re.findall(r"""import_structure\[\"([^\"]*)\"\]""" , lowerCamelCase ) ) )
UpperCamelCase_: Dict = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(lowerCamelCase ) > 0:
UpperCamelCase_: Dict = """\n""".join(F'''- {module}''' for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registed in the main init of Transformers:\n"""
F'''{list_of_modules}\n'''
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 548 | 0 |
"""simple docstring"""
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_lowerCAmelCase = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
_lowerCAmelCase = {
# fairseq:
'wmt19-ru-en': {'length_penalty': 1.1},
'wmt19-en-ru': {'length_penalty': 1.15},
'wmt19-en-de': {'length_penalty': 1.0},
'wmt19-de-en': {'length_penalty': 1.1},
# allenai:
'wmt16-en-de-dist-12-1': {'length_penalty': 0.6},
'wmt16-en-de-dist-6-1': {'length_penalty': 0.6},
'wmt16-en-de-12-1': {'length_penalty': 0.8},
'wmt19-de-en-6-6-base': {'length_penalty': 0.6},
'wmt19-de-en-6-6-big': {'length_penalty': 0.6},
}
# this remaps the different models to their organization names
_lowerCAmelCase = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
_lowerCAmelCase = 'facebook'
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
_lowerCAmelCase = 'allenai'
def UpperCamelCase ( _A ) -> Optional[int]:
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
lowercase : Dict = dict((re.sub(r"""@@$""" , """""" , _A ), v) if k.endswith("""@@""" ) else (re.sub(r"""$""" , """</w>""" , _A ), v) for k, v in d.items() )
lowercase : int = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[F"""{k}</w>"""]
lowercase : Tuple = d[k] # restore
return da
def UpperCamelCase ( _A , _A ) -> Optional[Any]:
# prep
assert os.path.exists(_A )
os.makedirs(_A , exist_ok=_A )
print(F"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
lowercase : str = basename(_A )
lowercase : Union[str, Any] = dirname(_A )
lowercase : List[str] = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
lowercase : List[Any] = cls.hub_models()
lowercase : str = {"""bpe""": """fastbpe""", """tokenizer""": """moses"""}
lowercase : List[Any] = """."""
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(F"""using checkpoint {checkpoint_file}""" )
lowercase : Optional[int] = hub_utils.from_pretrained(
_A , _A , _A , archive_map=_A , **_A )
lowercase : List[Any] = vars(chkpt["""args"""]["""model"""] )
lowercase : Dict = args["""source_lang"""]
lowercase : Dict = args["""target_lang"""]
lowercase : List[Any] = dirname(_A )
lowercase : str = basename(_A )
# dicts
lowercase : Dict = os.path.join(_A , F"""dict.{src_lang}.txt""" )
lowercase : Tuple = os.path.join(_A , F"""dict.{tgt_lang}.txt""" )
lowercase : Any = Dictionary.load(_A )
lowercase : str = rewrite_dict_keys(src_dict.indices )
lowercase : List[Any] = len(_A )
lowercase : int = os.path.join(_A , """vocab-src.json""" )
print(F"""Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records""" )
with open(_A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(_A , ensure_ascii=_A , indent=_A ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
lowercase : str = True
for k in src_vocab.keys():
if not k.islower():
lowercase : int = False
break
lowercase : Union[str, Any] = Dictionary.load(_A )
lowercase : List[Any] = rewrite_dict_keys(tgt_dict.indices )
lowercase : Any = len(_A )
lowercase : List[str] = os.path.join(_A , """vocab-tgt.json""" )
print(F"""Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records""" )
with open(_A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(_A , ensure_ascii=_A , indent=_A ) )
# merges_file (bpecodes)
lowercase : List[str] = os.path.join(_A , VOCAB_FILES_NAMES["""merges_file"""] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
lowercase : Optional[int] = os.path.join(_A , _A )
if os.path.exists(_A ):
break
with open(_A , encoding="""utf-8""" ) as fin:
lowercase : Tuple = fin.read()
lowercase : List[Any] = re.sub(r""" \d+$""" , """""" , _A , 0 , re.M ) # remove frequency number
print(F"""Generating {merges_file}""" )
with open(_A , """w""" , encoding="""utf-8""" ) as fout:
fout.write(_A )
# model config
lowercase : Optional[int] = os.path.join(_A , """config.json""" )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", F"""need to extend tokenizer to support bpe={args['bpe']}"""
assert args["tokenizer"] == "moses", F"""need to extend tokenizer to support bpe={args['tokenizer']}"""
lowercase : Union[str, Any] = {
"""architectures""": ["""FSMTForConditionalGeneration"""],
"""model_type""": """fsmt""",
"""activation_dropout""": args["""activation_dropout"""],
"""activation_function""": """relu""",
"""attention_dropout""": args["""attention_dropout"""],
"""d_model""": args["""decoder_embed_dim"""],
"""dropout""": args["""dropout"""],
"""init_std""": 0.02,
"""max_position_embeddings""": args["""max_source_positions"""],
"""num_hidden_layers""": args["""encoder_layers"""],
"""src_vocab_size""": src_vocab_size,
"""tgt_vocab_size""": tgt_vocab_size,
"""langs""": [src_lang, tgt_lang],
"""encoder_attention_heads""": args["""encoder_attention_heads"""],
"""encoder_ffn_dim""": args["""encoder_ffn_embed_dim"""],
"""encoder_layerdrop""": args["""encoder_layerdrop"""],
"""encoder_layers""": args["""encoder_layers"""],
"""decoder_attention_heads""": args["""decoder_attention_heads"""],
"""decoder_ffn_dim""": args["""decoder_ffn_embed_dim"""],
"""decoder_layerdrop""": args["""decoder_layerdrop"""],
"""decoder_layers""": args["""decoder_layers"""],
"""bos_token_id""": 0,
"""pad_token_id""": 1,
"""eos_token_id""": 2,
"""is_encoder_decoder""": True,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_all_embeddings"""],
}
# good hparam defaults to start with
lowercase : List[str] = 5
lowercase : str = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
lowercase : Union[str, Any] = best_score_hparams[model_dir]["""length_penalty"""]
else:
lowercase : int = 1.0
print(F"""Generating {fsmt_model_config_file}""" )
with open(_A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(_A , ensure_ascii=_A , indent=_A ) )
# tokenizer config
lowercase : Dict = os.path.join(_A , _A )
lowercase : int = {
"""langs""": [src_lang, tgt_lang],
"""model_max_length""": 1_024,
"""do_lower_case""": do_lower_case,
}
print(F"""Generating {fsmt_tokenizer_config_file}""" )
with open(_A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(_A , ensure_ascii=_A , indent=_A ) )
# model
lowercase : int = chkpt["""models"""][0]
lowercase : Union[str, Any] = model.state_dict()
# rename keys to start with 'model.'
lowercase : str = OrderedDict(("""model.""" + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
lowercase : Tuple = [
"""model.model""",
"""model.encoder.version""",
"""model.decoder.version""",
"""model.encoder_embed_tokens.weight""",
"""model.decoder_embed_tokens.weight""",
"""model.encoder.embed_positions._float_tensor""",
"""model.decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
model_state_dict.pop(_A , _A )
lowercase : List[Any] = FSMTConfig.from_pretrained(_A )
lowercase : List[str] = FSMTForConditionalGeneration(_A )
# check that it loads ok
model_new.load_state_dict(_A , strict=_A )
# save
lowercase : Any = os.path.join(_A , _A )
print(F"""Generating {pytorch_weights_dump_path}""" )
torch.save(_A , _A )
print("""Conversion is done!""" )
print("""\nLast step is to upload the files to s3""" )
print(F"""cd {data_root}""" )
print(F"""transformers-cli upload {model_dir}""" )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fsmt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_lowerCAmelCase = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 348 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class UpperCamelCase (unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_SCREAMING_SNAKE_CASE : int = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def __snake_case ( self :Any , __magic_name__ :Optional[int] , __magic_name__ :Tuple , __magic_name__ :List[str] ) ->Optional[Any]:
lowercase : List[Any] = TextaTextGenerationPipeline(model=__magic_name__ , tokenizer=__magic_name__ )
return generator, ["Something to write", "Something else"]
def __snake_case ( self :Tuple , __magic_name__ :List[Any] , __magic_name__ :int ) ->Optional[Any]:
lowercase : Optional[Any] = generator("""Something there""" )
self.assertEqual(__magic_name__ , [{"""generated_text""": ANY(__magic_name__ )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
lowercase : int = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=__magic_name__ )
self.assertEqual(
__magic_name__ , [
[{"""generated_text""": ANY(__magic_name__ )}, {"""generated_text""": ANY(__magic_name__ )}],
[{"""generated_text""": ANY(__magic_name__ )}, {"""generated_text""": ANY(__magic_name__ )}],
] , )
lowercase : Dict = generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=__magic_name__ )
self.assertEqual(
__magic_name__ , [
[{"""generated_text""": ANY(__magic_name__ )}, {"""generated_text""": ANY(__magic_name__ )}],
[{"""generated_text""": ANY(__magic_name__ )}, {"""generated_text""": ANY(__magic_name__ )}],
] , )
with self.assertRaises(__magic_name__ ):
generator(4 )
@require_torch
def __snake_case ( self :int ) ->Any:
lowercase : Union[str, Any] = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" )
# do_sample=False necessary for reproducibility
lowercase : List[Any] = generator("""Something there""" , do_sample=__magic_name__ )
self.assertEqual(__magic_name__ , [{"""generated_text""": """"""}] )
lowercase : Dict = 3
lowercase : Optional[Any] = generator(
"""Something there""" , num_return_sequences=__magic_name__ , num_beams=__magic_name__ , )
lowercase : Tuple = [
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """"""},
]
self.assertEqual(__magic_name__ , __magic_name__ )
lowercase : Dict = generator("""This is a test""" , do_sample=__magic_name__ , num_return_sequences=2 , return_tensors=__magic_name__ )
self.assertEqual(
__magic_name__ , [
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
] , )
lowercase : List[Any] = generator.model.config.eos_token_id
lowercase : Dict = """<pad>"""
lowercase : Optional[Any] = generator(
["""This is a test""", """This is a second test"""] , do_sample=__magic_name__ , num_return_sequences=2 , batch_size=2 , return_tensors=__magic_name__ , )
self.assertEqual(
__magic_name__ , [
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
] , )
@require_tf
def __snake_case ( self :Optional[int] ) ->List[str]:
lowercase : Dict = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" )
# do_sample=False necessary for reproducibility
lowercase : List[Any] = generator("""Something there""" , do_sample=__magic_name__ )
self.assertEqual(__magic_name__ , [{"""generated_text""": """"""}] )
| 348 | 1 |
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class __a:
"""simple docstring"""
pass | 30 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = (
'''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'''
'''It takes two arguments named `image` which should be the original image, and `label` which should be a text '''
'''describing the elements what should be identified in the segmentation mask. The tool returns the mask.'''
)
lowerCAmelCase = '''CIDAS/clipseg-rd64-refined'''
lowerCAmelCase = '''image_segmenter'''
lowerCAmelCase = CLIPSegForImageSegmentation
lowerCAmelCase = ['''image''', '''text''']
lowerCAmelCase = ['''image''']
def __init__( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> Dict:
requires_backends(self ,['''vision'''] )
super().__init__(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple:
return self.pre_processor(text=[label] ,images=[image] ,padding=_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' )
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> str:
with torch.no_grad():
UpperCAmelCase_ : Dict = self.model(**_SCREAMING_SNAKE_CASE ).logits
return logits
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Dict:
UpperCAmelCase_ : Dict = outputs.cpu().detach().numpy()
UpperCAmelCase_ : Any = 0
UpperCAmelCase_ : List[Any] = 1
return Image.fromarray((array * 255).astype(np.uinta ) ) | 30 | 1 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : str , lowercase__ : List[Any] , lowercase__ : Tuple ) -> Dict:
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f'Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f'Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : Union[str, Any] , lowercase__ : str , lowercase__ : Union[str, Any] , lowercase__ : Tuple=True ) -> Optional[int]:
'''simple docstring'''
model.train()
lowerCAmelCase__ = model(lowercase__ )
lowerCAmelCase__ = F.mse_loss(lowercase__ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(lowercase__ )
def lowerCAmelCase_ (lowercase__ : Union[str, Any] , lowercase__ : Optional[Any]=False ) -> int:
'''simple docstring'''
set_seed(42 )
lowerCAmelCase__ = RegressionModel()
lowerCAmelCase__ = deepcopy(lowercase__ )
lowerCAmelCase__ = RegressionDataset(length=80 )
lowerCAmelCase__ = DataLoader(lowercase__ , batch_size=16 )
model.to(accelerator.device )
if sched:
lowerCAmelCase__ = AdamW(params=model.parameters() , lr=1e-3 )
lowerCAmelCase__ = AdamW(params=ddp_model.parameters() , lr=1e-3 )
lowerCAmelCase__ = LambdaLR(lowercase__ , lr_lambda=lambda lowercase__ : epoch**0.65 )
lowerCAmelCase__ = LambdaLR(lowercase__ , lr_lambda=lambda lowercase__ : epoch**0.65 )
# Make a copy of `model`
if sched:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = accelerator.prepare(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
else:
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.prepare(lowercase__ , lowercase__ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def lowerCAmelCase_ (lowercase__ : str ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = get_training_setup(lowercase__ )
# Use a single batch
lowerCAmelCase__ , lowerCAmelCase__ = next(iter(lowercase__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.gather((ddp_input, ddp_target) )
lowerCAmelCase__ , lowerCAmelCase__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowercase__ ):
step_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
else:
# Sync grads
step_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
lowerCAmelCase__ = ddp_input[torch.randperm(len(lowercase__ ) )]
def lowerCAmelCase_ (lowercase__ : Optional[Any] ) -> int:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = get_training_setup(lowercase__ )
# Use a single batch
lowerCAmelCase__ , lowerCAmelCase__ = next(iter(lowercase__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.gather((ddp_input, ddp_target) )
lowerCAmelCase__ , lowerCAmelCase__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowercase__ ):
step_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
else:
# Sync grads
step_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
lowerCAmelCase__ = ddp_input[torch.randperm(len(lowercase__ ) )]
def lowerCAmelCase_ (lowercase__ : Union[str, Any]=False , lowercase__ : List[str]=False ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = Accelerator(
split_batches=lowercase__ , dispatch_batches=lowercase__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = get_training_setup(lowercase__ )
for iteration, batch in enumerate(lowercase__ ):
lowerCAmelCase__ , lowerCAmelCase__ = batch.values()
# Gather the distributed inputs and targs for the base model
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.gather((ddp_input, ddp_target) )
lowerCAmelCase__ , lowerCAmelCase__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(lowercase__ ):
step_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(lowercase__ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
lowerCAmelCase__ = ddp_input[torch.randperm(len(lowercase__ ) )]
GradientState._reset_state()
def lowerCAmelCase_ (lowercase__ : List[Any]=False , lowercase__ : Optional[int]=False ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = Accelerator(
split_batches=lowercase__ , dispatch_batches=lowercase__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = get_training_setup(lowercase__ , lowercase__ )
for iteration, batch in enumerate(lowercase__ ):
lowerCAmelCase__ , lowerCAmelCase__ = batch.values()
# Gather the distributed inputs and targs for the base model
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.gather((ddp_input, ddp_target) )
lowerCAmelCase__ , lowerCAmelCase__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowercase__ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(lowercase__ ):
step_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f'Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'
lowerCAmelCase__ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowercase__ ))
if accelerator.num_processes > 1:
check_model_parameters(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
GradientState._reset_state()
def lowerCAmelCase_ () -> Any:
'''simple docstring'''
lowerCAmelCase__ = Accelerator()
lowerCAmelCase__ = RegressionDataset(length=80 )
lowerCAmelCase__ = DataLoader(lowercase__ , batch_size=16 )
lowerCAmelCase__ = RegressionDataset(length=96 )
lowerCAmelCase__ = DataLoader(lowercase__ , batch_size=16 )
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.prepare(lowercase__ , lowercase__ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(lowercase__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowercase__ )
if iteration < len(lowercase__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(lowercase__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowercase__ )
if batch_num < len(lowercase__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def lowerCAmelCase_ () -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = Accelerator()
lowerCAmelCase__ = accelerator.state
if state.local_process_index == 0:
print('''**Test `accumulate` gradient accumulation with dataloader break**''' )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('''**Test NOOP `no_sync` context manager**''' )
test_noop_sync(lowercase__ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('''**Test Distributed `no_sync` context manager**''' )
test_distributed_sync(lowercase__ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation, ''' , f'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , )
test_gradient_accumulation(lowercase__ , lowercase__ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('''<''' , '''2.0''' ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , '''`split_batches=False`, `dispatch_batches=False`**''' , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , f'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , )
test_gradient_accumulation_with_opt_and_scheduler(lowercase__ , lowercase__ )
def lowerCAmelCase_ (lowercase__ : Tuple ) -> Any:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 715 |
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 288 | 0 |
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
lowercase_ : Union[str, Any] = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
lowercase_ : Union[str, Any] = logging.getLogger()
def A__ ( ):
SCREAMING_SNAKE_CASE__: int= argparse.ArgumentParser()
parser.add_argument('''-f''' )
SCREAMING_SNAKE_CASE__: str= parser.parse_args()
return args.f
def A__ ( snake_case_ : str , snake_case_ : Optional[Any]="eval" ):
SCREAMING_SNAKE_CASE__: List[str]= os.path.join(snake_case_ , F'{split}_results.json' )
if os.path.exists(snake_case_ ):
with open(snake_case_ , '''r''' ) as f:
return json.load(snake_case_ )
raise ValueError(F'can\'t find {path}' )
lowercase_ : Optional[int] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _lowerCamelCase ( UpperCamelCase_ ):
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: str= self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE__: Tuple= f'\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n '.split()
with patch.object(lowerCAmelCase , '''argv''' , lowerCAmelCase ):
run_flax_glue.main()
SCREAMING_SNAKE_CASE__: List[str]= get_results(lowerCAmelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
@slow
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Dict= self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE__: Dict= f'\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n '.split()
with patch.object(lowerCAmelCase , '''argv''' , lowerCAmelCase ):
run_clm_flax.main()
SCREAMING_SNAKE_CASE__: Optional[Any]= get_results(lowerCAmelCase )
self.assertLess(result['''eval_perplexity'''] , 100 )
@slow
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Optional[Any]= self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE__: Tuple= f'\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n '.split()
with patch.object(lowerCAmelCase , '''argv''' , lowerCAmelCase ):
run_summarization_flax.main()
SCREAMING_SNAKE_CASE__: Optional[int]= get_results(lowerCAmelCase , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 10 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: str= self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE__: Union[str, Any]= f'\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n '.split()
with patch.object(lowerCAmelCase , '''argv''' , lowerCAmelCase ):
run_mlm_flax.main()
SCREAMING_SNAKE_CASE__: List[Any]= get_results(lowerCAmelCase )
self.assertLess(result['''eval_perplexity'''] , 42 )
@slow
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: Dict= self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE__: Optional[Any]= f'\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n '.split()
with patch.object(lowerCAmelCase , '''argv''' , lowerCAmelCase ):
run_ta_mlm_flax.main()
SCREAMING_SNAKE_CASE__: Optional[int]= get_results(lowerCAmelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 )
@slow
def UpperCamelCase_ ( self ) -> List[str]:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
SCREAMING_SNAKE_CASE__: Union[str, Any]= 7 if get_gpu_count() > 1 else 2
SCREAMING_SNAKE_CASE__: Optional[int]= self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE__: int= f'\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n '.split()
with patch.object(lowerCAmelCase , '''argv''' , lowerCAmelCase ):
run_flax_ner.main()
SCREAMING_SNAKE_CASE__: List[Any]= get_results(lowerCAmelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def UpperCamelCase_ ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: Optional[int]= self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE__: str= f'\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n '.split()
with patch.object(lowerCAmelCase , '''argv''' , lowerCAmelCase ):
run_qa.main()
SCREAMING_SNAKE_CASE__: Optional[int]= get_results(lowerCAmelCase )
self.assertGreaterEqual(result['''eval_f1'''] , 30 )
self.assertGreaterEqual(result['''eval_exact'''] , 30 )
| 64 |
import os
from datetime import datetime as dt
from github import Github
UpperCAmelCase__ : List[Any] = [
"good first issue",
"feature request",
"wip",
]
def A ( ) -> Dict:
'''simple docstring'''
__snake_case = Github(os.environ['GITHUB_TOKEN'] )
__snake_case = g.get_repo('huggingface/accelerate' )
__snake_case = repo.get_issues(state='open' )
for issue in open_issues:
__snake_case = sorted([comment for comment in issue.get_comments()] , key=lambda snake_case__ : i.created_at , reverse=snake_case__ )
__snake_case = comments[0] if len(snake_case__ ) > 0 else None
__snake_case = dt.utcnow()
__snake_case = (current_time - issue.updated_at).days
__snake_case = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='closed' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 313 | 0 |
def a (lowerCAmelCase__ , lowerCAmelCase__ ):
_enforce_args(_lowerCamelCase , _lowerCamelCase )
if n == 0:
return 0
__a = float("""-inf""" )
for i in range(1 , n + 1 ):
__a = max(
_lowerCamelCase , prices[i - 1] + naive_cut_rod_recursive(n - i , _lowerCamelCase ) )
return max_revue
def a (lowerCAmelCase__ , lowerCAmelCase__ ):
_enforce_args(_lowerCamelCase , _lowerCamelCase )
__a = [float("""-inf""" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
__a = float("""-inf""" )
for i in range(1 , n + 1 ):
__a = max(
_lowerCamelCase , prices[i - 1] + _top_down_cut_rod_recursive(n - i , _lowerCamelCase , _lowerCamelCase ) , )
__a = max_revenue
return max_rev[n]
def a (lowerCAmelCase__ , lowerCAmelCase__ ):
_enforce_args(_lowerCamelCase , _lowerCamelCase )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
__a = [float("""-inf""" ) for _ in range(n + 1 )]
__a = 0
for i in range(1 , n + 1 ):
__a = max_rev[i]
for j in range(1 , i + 1 ):
__a = max(_lowerCamelCase , prices[j - 1] + max_rev[i - j] )
__a = max_revenue_i
return max_rev[n]
def a (lowerCAmelCase__ , lowerCAmelCase__ ):
if n < 0:
__a = f'''n must be greater than or equal to 0. Got n = {n}'''
raise ValueError(_lowerCamelCase )
if n > len(_lowerCamelCase ):
__a = (
"Each integral piece of rod must have a corresponding price. "
f'''Got n = {n} but length of prices = {len(_lowerCamelCase )}'''
)
raise ValueError(_lowerCamelCase )
def a ():
__a = [6, 10, 12, 15, 20, 23]
__a = len(_lowerCamelCase )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
__a = 36
__a = top_down_cut_rod(_lowerCamelCase , _lowerCamelCase )
__a = bottom_up_cut_rod(_lowerCamelCase , _lowerCamelCase )
__a = naive_cut_rod_recursive(_lowerCamelCase , _lowerCamelCase )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 706 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'openai/imagegpt-small': '',
'openai/imagegpt-medium': '',
'openai/imagegpt-large': '',
}
class __UpperCAmelCase ( __A ):
"""simple docstring"""
_lowerCamelCase = """imagegpt"""
_lowerCamelCase = ["""past_key_values"""]
_lowerCamelCase = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __A=512 + 1 , __A=32 * 32 , __A=512 , __A=24 , __A=8 , __A=None , __A="quick_gelu" , __A=0.1 , __A=0.1 , __A=0.1 , __A=1E-5 , __A=0.02 , __A=True , __A=True , __A=False , __A=False , __A=False , **__A , ):
__a = vocab_size
__a = n_positions
__a = n_embd
__a = n_layer
__a = n_head
__a = n_inner
__a = activation_function
__a = resid_pdrop
__a = embd_pdrop
__a = attn_pdrop
__a = layer_norm_epsilon
__a = initializer_range
__a = scale_attn_weights
__a = use_cache
__a = scale_attn_by_inverse_layer_idx
__a = reorder_and_upcast_attn
__a = tie_word_embeddings
super().__init__(tie_word_embeddings=__A , **__A )
class __UpperCAmelCase ( __A ):
"""simple docstring"""
@property
def snake_case_ ( self ):
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
] )
def snake_case_ ( self , __A , __A = 1 , __A = -1 , __A = False , __A = None , __A = 3 , __A = 32 , __A = 32 , ):
__a = self._generate_dummy_images(__A , __A , __A , __A )
__a = dict(preprocessor(images=__A , return_tensors=__A ) )
return inputs
| 209 | 0 |
from __future__ import annotations
def lowerCamelCase__ ( a : int = 4 ) -> list[list[int]]:
"""simple docstring"""
a__ :Optional[Any] = abs(a ) or 4
return [[1 + x + y * row_size for x in range(a )] for y in range(a )]
def lowerCamelCase__ ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_row(transpose(a ) )
# OR.. transpose(reverse_column(matrix))
def lowerCamelCase__ ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_row(reverse_column(a ) )
# OR.. reverse_column(reverse_row(matrix))
def lowerCamelCase__ ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_column(transpose(a ) )
# OR.. transpose(reverse_row(matrix))
def lowerCamelCase__ ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
a__ :str = [list(a ) for x in zip(*a )]
return matrix
def lowerCamelCase__ ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
a__ :Optional[Any] = matrix[::-1]
return matrix
def lowerCamelCase__ ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
a__ :Union[str, Any] = [x[::-1] for x in matrix]
return matrix
def lowerCamelCase__ ( a : list[list[int]] ) -> None:
"""simple docstring"""
for i in matrix:
print(*a )
if __name__ == "__main__":
snake_case__ = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 90 counterclockwise:\n''')
print_matrix(rotate_aa(matrix))
snake_case__ = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 180:\n''')
print_matrix(rotate_aaa(matrix))
snake_case__ = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 270 counterclockwise:\n''')
print_matrix(rotate_aaa(matrix))
| 395 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 395 | 1 |
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class __snake_case ( _lowerCAmelCase ):
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = 5
# Realm tok
SCREAMING_SNAKE_CASE_ = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'test',
'question',
'this',
'is',
'the',
'first',
'second',
'third',
'fourth',
'fifth',
'record',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , 'realm_tokenizer')
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase)
SCREAMING_SNAKE_CASE_ = os.path.join(_lowerCAmelCase , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens]))
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , 'realm_block_records')
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase)
def lowerCAmelCase__ ( self):
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'realm_tokenizer'))
def lowerCAmelCase__ ( self):
shutil.rmtree(self.tmpdirname)
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = RealmConfig(num_block_records=self.num_block_records)
return config
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = Dataset.from_dict(
{
'id': ['0', '1'],
'question': ['foo', 'bar'],
'answers': [['Foo', 'Bar'], ['Bar']],
})
return dataset
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = np.array(
[
b'This is the first record',
b'This is the second record',
b'This is the third record',
b'This is the fourth record',
b'This is the fifth record',
b'This is a longer longer longer record',
] , dtype=_lowerCAmelCase , )
return block_records
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = self.get_config()
SCREAMING_SNAKE_CASE_ = self.get_dummy_retriever()
SCREAMING_SNAKE_CASE_ = retriever.tokenizer
SCREAMING_SNAKE_CASE_ = np.array([0, 3] , dtype='long')
SCREAMING_SNAKE_CASE_ = tokenizer(['Test question']).input_ids
SCREAMING_SNAKE_CASE_ = tokenizer(
['the fourth'] , add_special_tokens=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , ).input_ids
SCREAMING_SNAKE_CASE_ = config.reader_seq_len
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = retriever(
_lowerCAmelCase , _lowerCAmelCase , answer_ids=_lowerCAmelCase , max_length=_lowerCAmelCase , return_tensors='np')
self.assertEqual(len(_lowerCAmelCase) , 2)
self.assertEqual(len(_lowerCAmelCase) , 2)
self.assertEqual(len(_lowerCAmelCase) , 2)
self.assertEqual(concat_inputs.input_ids.shape , (2, 10))
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10))
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10))
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10))
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0]) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'first', 'record', '[SEP]'] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1]) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'fourth', 'record', '[SEP]'] , )
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = self.get_config()
SCREAMING_SNAKE_CASE_ = self.get_dummy_retriever()
SCREAMING_SNAKE_CASE_ = retriever.tokenizer
SCREAMING_SNAKE_CASE_ = np.array([0, 3, 5] , dtype='long')
SCREAMING_SNAKE_CASE_ = tokenizer(['Test question']).input_ids
SCREAMING_SNAKE_CASE_ = tokenizer(
['the fourth', 'longer longer'] , add_special_tokens=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , ).input_ids
SCREAMING_SNAKE_CASE_ = config.reader_seq_len
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = retriever(
_lowerCAmelCase , _lowerCAmelCase , answer_ids=_lowerCAmelCase , max_length=_lowerCAmelCase , return_tensors='np')
self.assertEqual([False, True, True] , _lowerCAmelCase)
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , _lowerCAmelCase)
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , _lowerCAmelCase)
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , 'realm_block_records'))
# Test local path
SCREAMING_SNAKE_CASE_ = retriever.from_pretrained(os.path.join(self.tmpdirname , 'realm_block_records'))
self.assertEqual(retriever.block_records[0] , b'This is the first record')
# Test mocked remote path
with patch('transformers.models.realm.retrieval_realm.hf_hub_download') as mock_hf_hub_download:
SCREAMING_SNAKE_CASE_ = os.path.join(
os.path.join(self.tmpdirname , 'realm_block_records') , _REALM_BLOCK_RECORDS_FILENAME)
SCREAMING_SNAKE_CASE_ = RealmRetriever.from_pretrained('google/realm-cc-news-pretrained-openqa')
self.assertEqual(retriever.block_records[0] , b'This is the first record')
| 709 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
UpperCamelCase__ : int = "\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n"
class __snake_case ( unittest.TestCase , lowerCAmelCase__ ):
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = load_tool('text-question-answering')
self.tool.setup()
SCREAMING_SNAKE_CASE_ = load_tool('text-question-answering' , remote=_A)
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = self.tool(_A , 'What did Hugging Face do in April 2021?')
self.assertEqual(_A , 'launched the BigScience Research Workshop')
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = self.remote_tool(_A , 'What did Hugging Face do in April 2021?')
self.assertEqual(_A , 'launched the BigScience Research Workshop')
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = self.tool(text=_A , question='What did Hugging Face do in April 2021?')
self.assertEqual(_A , 'launched the BigScience Research Workshop')
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = self.remote_tool(text=_A , question='What did Hugging Face do in April 2021?')
self.assertEqual(_A , 'launched the BigScience Research Workshop')
| 620 | 0 |
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
lowercase__ : Optional[Any] = logging.get_logger(__name__)
lowercase__ : Tuple = {'''vocab_file''': '''spiece.model'''}
lowercase__ : Optional[Any] = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
}
}
# TODO(PVP) - this should be removed in Transformers v5
lowercase__ : List[Any] = {
'''t5-small''': 5_1_2,
'''t5-base''': 5_1_2,
'''t5-large''': 5_1_2,
'''t5-3b''': 5_1_2,
'''t5-11b''': 5_1_2,
}
lowercase__ : int = '''▁'''
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : Dict = VOCAB_FILES_NAMES
UpperCAmelCase_ : Any = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Tuple = ["""input_ids""", """attention_mask"""]
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE=100 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE=True , **__SCREAMING_SNAKE_CASE , ) ->None:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
lowerCAmelCase = [F"<extra_id_{i}>" for i in range(__SCREAMING_SNAKE_CASE )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
lowerCAmelCase = len(set(filter(lambda __SCREAMING_SNAKE_CASE : bool('''extra_id''' in str(__SCREAMING_SNAKE_CASE ) ) , __SCREAMING_SNAKE_CASE ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
if legacy:
logger.warning_once(
F"You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to"
''' read the related pull request available at https://github.com/huggingface/transformers/pull/24565''' )
lowerCAmelCase = legacy
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , extra_ids=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , legacy=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
lowerCAmelCase = vocab_file
lowerCAmelCase = extra_ids
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__SCREAMING_SNAKE_CASE )
@staticmethod
def SCREAMING_SNAKE_CASE_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Optional[int]:
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
lowerCAmelCase = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
F" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
F" {pretrained_model_name_or_path} automatically truncating your input to"
F" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"
F" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , __SCREAMING_SNAKE_CASE , )
return max_model_length
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
return self.sp_model.get_piece_size() + self._extra_ids
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
lowerCAmelCase = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False ) ->List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
return ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
return list(
set(filter(lambda __SCREAMING_SNAKE_CASE : bool(re.search(R'''<extra_id_\d+>''' , __SCREAMING_SNAKE_CASE ) ) is not None , self.additional_special_tokens ) ) )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
return [self._convert_token_to_id(__SCREAMING_SNAKE_CASE ) for token in self.get_sentinel_tokens()]
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->List[int]:
if len(__SCREAMING_SNAKE_CASE ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
''' eos tokens being added.''' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->List[int]:
lowerCAmelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->List[int]:
lowerCAmelCase = self._add_eos_if_not_present(__SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return token_ids_a
else:
lowerCAmelCase = self._add_eos_if_not_present(__SCREAMING_SNAKE_CASE )
return token_ids_a + token_ids_a
def __getstate__( self ) ->List[str]:
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self , __SCREAMING_SNAKE_CASE ) ->str:
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->List[str]:
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
lowerCAmelCase = SPIECE_UNDERLINE + text.replace(__SCREAMING_SNAKE_CASE , ''' ''' )
return super().tokenize(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->Dict:
if not self.legacy:
lowerCAmelCase = text.startswith(__SCREAMING_SNAKE_CASE )
if is_first:
lowerCAmelCase = text[1:]
lowerCAmelCase = self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
if not self.legacy and not is_first and not text.startswith(''' ''' ) and tokens[0].startswith(__SCREAMING_SNAKE_CASE ):
lowerCAmelCase = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Any:
if token.startswith('''<extra_id_''' ):
lowerCAmelCase = re.match(R'''<extra_id_(\d+)>''' , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Optional[Any]:
if index < self.sp_model.get_piece_size():
lowerCAmelCase = self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase = F"<extra_id_{self.vocab_size - 1 - index}>"
return token
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->List[Any]:
lowerCAmelCase = []
lowerCAmelCase = ''''''
lowerCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token
lowerCAmelCase = True
lowerCAmelCase = []
else:
current_sub_tokens.append(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = False
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE )
return out_string.strip()
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->Tuple[str]:
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCAmelCase = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 312 | import numpy as np
lowercase__ : Tuple = [
['''a''', '''b''', '''c''', '''d''', '''e'''],
['''f''', '''g''', '''h''', '''i''', '''k'''],
['''l''', '''m''', '''n''', '''o''', '''p'''],
['''q''', '''r''', '''s''', '''t''', '''u'''],
['''v''', '''w''', '''x''', '''y''', '''z'''],
]
class lowercase_ :
"""simple docstring"""
def __init__( self ) ->None:
lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->np.ndarray:
lowerCAmelCase , lowerCAmelCase = np.where(letter == self.SQUARE )
lowerCAmelCase = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->str:
lowerCAmelCase = self.SQUARE[indexa - 1, indexa - 1]
return letter
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->str:
lowerCAmelCase = message.lower()
lowerCAmelCase = message.replace(''' ''' , '''''' )
lowerCAmelCase = message.replace('''j''' , '''i''' )
lowerCAmelCase = np.empty((2, len(__SCREAMING_SNAKE_CASE )) )
for letter_index in range(len(__SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase = self.letter_to_numbers(message[letter_index] )
lowerCAmelCase = numbers[0]
lowerCAmelCase = numbers[1]
lowerCAmelCase = first_step.reshape(2 * len(__SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = ''''''
for numbers_index in range(len(__SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase = int(second_step[numbers_index * 2] )
lowerCAmelCase = int(second_step[(numbers_index * 2) + 1] )
lowerCAmelCase = self.numbers_to_letter(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = encoded_message + letter
return encoded_message
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->str:
lowerCAmelCase = message.lower()
message.replace(''' ''' , '''''' )
lowerCAmelCase = np.empty(2 * len(__SCREAMING_SNAKE_CASE ) )
for letter_index in range(len(__SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase = self.letter_to_numbers(message[letter_index] )
lowerCAmelCase = numbers[0]
lowerCAmelCase = numbers[1]
lowerCAmelCase = first_step.reshape((2, len(__SCREAMING_SNAKE_CASE )) )
lowerCAmelCase = ''''''
for numbers_index in range(len(__SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase = int(second_step[0, numbers_index] )
lowerCAmelCase = int(second_step[1, numbers_index] )
lowerCAmelCase = self.numbers_to_letter(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = decoded_message + letter
return decoded_message
| 312 | 1 |
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
__magic_name__ : List[str] = TypeVar('KEY')
__magic_name__ : Optional[int] = TypeVar('VAL')
@dataclass(frozen=__snake_case , slots=__snake_case )
class lowerCamelCase ( Generic[KEY, VAL] ):
"""simple docstring"""
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
class lowerCamelCase ( _Item ):
"""simple docstring"""
def __init__( self ):
super().__init__(__UpperCamelCase , __UpperCamelCase )
def __bool__( self ):
return False
__magic_name__ : List[str] = _DeletedItem()
class lowerCamelCase ( MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self , __UpperCamelCase = 8 , __UpperCamelCase = 0.75 ):
A_ = initial_block_size
A_ = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
A_ = capacity_factor
A_ = 0
def lowercase_ ( self , __UpperCamelCase ):
return hash(__UpperCamelCase ) % len(self._buckets )
def lowercase_ ( self , __UpperCamelCase ):
return (ind + 1) % len(self._buckets )
def lowercase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
A_ = self._buckets[ind]
if not stored:
A_ = _Item(__UpperCamelCase , __UpperCamelCase )
self._len += 1
return True
elif stored.key == key:
A_ = _Item(__UpperCamelCase , __UpperCamelCase )
return True
else:
return False
def lowercase_ ( self ):
A_ = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(__UpperCamelCase )
def lowercase_ ( self ):
if len(self._buckets ) <= self._initial_block_size:
return False
A_ = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def lowercase_ ( self , __UpperCamelCase ):
A_ = self._buckets
A_ = [None] * new_size
A_ = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def lowercase_ ( self ):
self._resize(len(self._buckets ) * 2 )
def lowercase_ ( self ):
self._resize(len(self._buckets ) // 2 )
def lowercase_ ( self , __UpperCamelCase ):
A_ = self._get_bucket_index(__UpperCamelCase )
for _ in range(len(self._buckets ) ):
yield ind
A_ = self._get_next_ind(__UpperCamelCase )
def lowercase_ ( self , __UpperCamelCase , __UpperCamelCase ):
for ind in self._iterate_buckets(__UpperCamelCase ):
if self._try_set(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
break
def __setitem__( self , __UpperCamelCase , __UpperCamelCase ):
if self._is_full():
self._size_up()
self._add_item(__UpperCamelCase , __UpperCamelCase )
def __delitem__( self , __UpperCamelCase ):
for ind in self._iterate_buckets(__UpperCamelCase ):
A_ = self._buckets[ind]
if item is None:
raise KeyError(__UpperCamelCase )
if item is _deleted:
continue
if item.key == key:
A_ = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self , __UpperCamelCase ):
for ind in self._iterate_buckets(__UpperCamelCase ):
A_ = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(__UpperCamelCase )
def __len__( self ):
return self._len
def __iter__( self ):
yield from (item.key for item in self._buckets if item)
def __repr__( self ):
A_ = " ,".join(
f'{item.key}: {item.val}' for item in self._buckets if item )
return f'HashMap({val_string})'
| 608 |
def lowerCAmelCase ( snake_case__ : list )-> list:
if len(snake_case__ ) <= 1:
return lst
A_ = 1
while i < len(snake_case__ ):
if lst[i - 1] <= lst[i]:
i += 1
else:
A_ , A_ = lst[i], lst[i - 1]
i -= 1
if i == 0:
A_ = 1
return lst
if __name__ == "__main__":
__magic_name__ : Any = input('Enter numbers separated by a comma:\n').strip()
__magic_name__ : Tuple = [int(item) for item in user_input.split(',')]
print(gnome_sort(unsorted))
| 608 | 1 |
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
lowercase_ = logging.getLogger()
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = {}
__SCREAMING_SNAKE_CASE : List[str] = os.path.join(snake_case , '''all_results.json''' )
if os.path.exists(snake_case ):
with open(snake_case , '''r''' ) as f:
__SCREAMING_SNAKE_CASE : List[str] = json.load(snake_case )
else:
raise ValueError(F'''can\'t find {path}''' )
return results
lowercase_ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
import xla_spawn
__SCREAMING_SNAKE_CASE : Optional[Any] = self.get_auto_remove_tmp_dir()
__SCREAMING_SNAKE_CASE : Dict = F'''
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(_A , '''argv''' , _A ):
__SCREAMING_SNAKE_CASE : str = time()
xla_spawn.main()
__SCREAMING_SNAKE_CASE : Any = time()
__SCREAMING_SNAKE_CASE : Dict = get_results(_A )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 500 )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
import xla_spawn
__SCREAMING_SNAKE_CASE : Optional[Any] = '''
./tests/test_trainer_tpu.py
--num_cores=8
./tests/test_trainer_tpu.py
'''.split()
with patch.object(_A , '''argv''' , _A ):
xla_spawn.main()
| 74 |
'''simple docstring'''
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def a_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=0 ) -> Dict:
"""simple docstring"""
if name is None:
snake_case: Any =None
else:
snake_case: Any ='.' * max(0 , spaces - 2 ) + '# {:' + str(50 - spaces ) + 's}'
snake_case: Optional[int] =fmt.format(__UpperCAmelCase )
# Print and recurse (if needed).
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
if msg is not None:
print(__UpperCAmelCase )
for k in val.keys():
recursive_print(__UpperCAmelCase , val[k] , spaces + 2 )
elif isinstance(__UpperCAmelCase , torch.Tensor ):
print(__UpperCAmelCase , ':' , val.size() )
else:
print(__UpperCAmelCase , ':' , __UpperCAmelCase )
def a_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
"""simple docstring"""
snake_case: Any =param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
snake_case: Tuple =(num_heads, hidden_size, num_splits) + input_shape[1:]
snake_case: Tuple =param.view(*__UpperCAmelCase )
snake_case: List[Any] =param.transpose(0 , 2 )
snake_case: Union[str, Any] =param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
snake_case: Any =(num_heads, num_splits, hidden_size) + input_shape[1:]
snake_case: str =param.view(*__UpperCAmelCase )
snake_case: Optional[Any] =param.transpose(0 , 1 ).contiguous()
snake_case: Any =param.view(*__UpperCAmelCase )
return param
def a_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
snake_case: Optional[Any] ={}
# old versions did not store training args
snake_case: Dict =input_state_dict.get('args' , __UpperCAmelCase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
snake_case: List[Any] =ds_args.padded_vocab_size
snake_case: List[Any] =ds_args.max_position_embeddings
snake_case: str =ds_args.hidden_size
snake_case: Any =ds_args.num_layers
snake_case: Dict =ds_args.num_attention_heads
snake_case: Dict =ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
snake_case: Any =config.n_head
# The hidden_size per head.
snake_case: Union[str, Any] =config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
snake_case: Any =input_state_dict['checkpoint_version']
else:
snake_case: Optional[int] =0.0
# The model.
snake_case: List[str] =input_state_dict['model']
# The language model.
snake_case: List[Any] =model['language_model']
# The embeddings.
snake_case: Union[str, Any] =lm['embedding']
# The word embeddings.
snake_case: List[Any] =embeddings['word_embeddings']['weight']
# Truncate the embedding table to vocab_size rows.
snake_case: Dict =word_embeddings[: config.vocab_size, :]
snake_case: List[str] =word_embeddings
# The position embeddings.
snake_case: str =embeddings['position_embeddings']['weight']
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
snake_case: Dict =pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' )
# Store the position embeddings.
snake_case: Any =pos_embeddings
# The transformer.
snake_case: Union[str, Any] =lm['transformer'] if 'transformer' in lm.keys() else lm['encoder']
# The regex to extract layer names.
snake_case: Union[str, Any] =re.compile(R'layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)' )
# The simple map of names for "automated" rules.
snake_case: List[str] ={
'attention.dense': '.attn.c_proj.',
'self_attention.dense': '.attn.c_proj.',
'mlp.dense_h_to_4h': '.mlp.c_fc.',
'mlp.dense_4h_to_h': '.mlp.c_proj.',
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
snake_case: Union[str, Any] =layer_re.match(__UpperCAmelCase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
snake_case: str =int(m.group(1 ) )
# The name of the operation.
snake_case: Optional[Any] =m.group(2 )
# Is it a weight or a bias?
snake_case: Any =m.group(3 )
# The name of the layer.
snake_case: Tuple =f'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith('layernorm' ):
snake_case: Union[str, Any] ='ln_1' if op_name.startswith('input' ) else 'ln_2'
snake_case: List[str] =val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
snake_case: Optional[Any] =torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , __UpperCAmelCase , __UpperCAmelCase )
snake_case: int =causal_mask
# Insert a "dummy" tensor for masked_bias.
snake_case: Dict =torch.tensor(-1e4 , dtype=torch.floataa )
snake_case: Optional[Any] =masked_bias
snake_case: Dict =fix_query_key_value_ordering(__UpperCAmelCase , __UpperCAmelCase , 3 , __UpperCAmelCase , __UpperCAmelCase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
snake_case: Dict =out_val.transpose(0 , 1 ).contiguous()
# Store.
snake_case: Optional[Any] =out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
snake_case: Dict =fix_query_key_value_ordering(__UpperCAmelCase , __UpperCAmelCase , 3 , __UpperCAmelCase , __UpperCAmelCase )
# Store. No change of shape.
snake_case: str =out_val
# Transpose the weights.
elif weight_or_bias == "weight":
snake_case: Optional[int] =megatron_to_transformers[op_name]
snake_case: str =val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
snake_case: int =megatron_to_transformers[op_name]
snake_case: Dict =val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
snake_case: Optional[int] =transformer['final_layernorm.weight']
snake_case: Optional[Any] =transformer['final_layernorm.bias']
# For LM head, transformers' wants the matrix to weight embeddings.
snake_case: Union[str, Any] =word_embeddings
# It should be done!
return output_state_dict
def a_ ( ) -> Tuple:
"""simple docstring"""
snake_case: List[str] =argparse.ArgumentParser()
parser.add_argument('--print-checkpoint-structure' , action='store_true' )
parser.add_argument(
'path_to_checkpoint' , type=__UpperCAmelCase , help='Path to the checkpoint file (.zip archive or direct .pt file)' , )
parser.add_argument(
'--config_file' , default='' , type=__UpperCAmelCase , help='An optional config json file describing the pre-trained model.' , )
snake_case: List[Any] =parser.parse_args()
# Extract the basename.
snake_case: Any =os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' )
if args.path_to_checkpoint.endswith('.zip' ):
with zipfile.ZipFile(args.path_to_checkpoint , 'r' ) as checkpoint:
with checkpoint.open('release/mp_rank_00/model_optim_rng.pt' ) as pytorch_dict:
snake_case: List[Any] =torch.load(__UpperCAmelCase , map_location='cpu' )
else:
snake_case: Dict =torch.load(args.path_to_checkpoint , map_location='cpu' )
snake_case: Optional[Any] =input_state_dict.get('args' , __UpperCAmelCase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
snake_case: List[Any] ='gelu_fast'
elif ds_args.openai_gelu:
snake_case: Optional[int] ='gelu_new'
else:
snake_case: Any ='gelu'
else:
# in the very early days this used to be "gelu_new"
snake_case: Dict ='gelu_new'
# Spell out all parameters in case the defaults change.
snake_case: Union[str, Any] =GPTaConfig(
vocab_size=5_02_57 , n_positions=10_24 , n_embd=10_24 , n_layer=24 , n_head=16 , n_inner=40_96 , activation_function=__UpperCAmelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type='cls_index' , summary_use_proj=__UpperCAmelCase , summary_activation=__UpperCAmelCase , summary_proj_to_labels=__UpperCAmelCase , summary_first_dropout=0.1 , scale_attn_weights=__UpperCAmelCase , use_cache=__UpperCAmelCase , bos_token_id=5_02_56 , eos_token_id=5_02_56 , )
else:
snake_case: Optional[Any] =GPTaConfig.from_json_file(args.config_file )
snake_case: int =['GPT2LMHeadModel']
# Convert.
print('Converting' )
snake_case: str =convert_megatron_checkpoint(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(__UpperCAmelCase , __UpperCAmelCase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
snake_case: Dict =ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
snake_case: Tuple ='gpt2'
elif tokenizer_type == "PretrainedFromHF":
snake_case: int =ds_args.tokenizer_name_or_path
else:
raise ValueError(f'''Unrecognized tokenizer_type {tokenizer_type}''' )
else:
snake_case: Optional[Any] ='gpt2'
snake_case: List[Any] =AutoTokenizer.from_pretrained(__UpperCAmelCase )
snake_case: Any =type(__UpperCAmelCase ).__name__
snake_case: Optional[Any] =tokenizer_class
# Store the config to file.
print('Saving config' )
config.save_pretrained(__UpperCAmelCase )
# Save tokenizer based on args
print(f'''Adding {tokenizer_class} tokenizer files''' )
tokenizer.save_pretrained(__UpperCAmelCase )
# Store the state_dict to file.
snake_case: int =os.path.join(__UpperCAmelCase , 'pytorch_model.bin' )
print(f'''Saving checkpoint to "{output_checkpoint_file}"''' )
torch.save(__UpperCAmelCase , __UpperCAmelCase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 350 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = BertConfig.from_json_file(_lowerCamelCase )
print(f"""Building PyTorch model from configuration: {config}""" )
_lowerCAmelCase : List[Any] = BertForPreTraining(_lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_bert(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , _lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_lowerCAmelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 16 |
"""simple docstring"""
from collections.abc import Callable
class __UpperCamelCase :
def __init__( self ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : list = []
# Stores indexes of each item for supporting updates and deletion.
_lowerCAmelCase : dict = {}
# Stores current size of heap.
_lowerCAmelCase : Union[str, Any] = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
_lowerCAmelCase : Union[str, Any] = key or (lambda _A : x)
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return int((i - 1) / 2 ) if i > 0 else None
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = int(2 * i + 1 )
return left if 0 < left < self.size else None
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = int(2 * i + 2 )
return right if 0 < right < self.size else None
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Tuple = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
_lowerCAmelCase, _lowerCAmelCase : Tuple = self.arr[j], self.arr[i]
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
return self.arr[i][1] < self.arr[j][1]
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self._left(_A )
_lowerCAmelCase : str = self._right(_A )
_lowerCAmelCase : Tuple = i
if left is not None and not self._cmp(_A ,_A ):
_lowerCAmelCase : int = left
if right is not None and not self._cmp(_A ,_A ):
_lowerCAmelCase : Optional[int] = right
return valid_parent
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Any = self._parent(_A )
while parent is not None and not self._cmp(_A ,_A ):
self._swap(_A ,_A )
_lowerCAmelCase, _lowerCAmelCase : List[str] = parent, self._parent(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self._get_valid_parent(_A )
while valid_parent != index:
self._swap(_A ,_A )
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = valid_parent, self._get_valid_parent(_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
if item not in self.pos_map:
return
_lowerCAmelCase : int = self.pos_map[item]
_lowerCAmelCase : Dict = [item, self.key(_A )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(_A )
self._heapify_down(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if item not in self.pos_map:
return
_lowerCAmelCase : List[str] = self.pos_map[item]
del self.pos_map[item]
_lowerCAmelCase : Dict = self.arr[self.size - 1]
_lowerCAmelCase : Optional[Any] = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(_A )
self._heapify_down(_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(_A )] )
else:
_lowerCAmelCase : Any = [item, self.key(_A )]
_lowerCAmelCase : str = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.arr[0] if self.size else None
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def lowerCamelCase__ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 | 1 |
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def lowerCAmelCase_ ( __lowerCamelCase ):
return x + 2
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Tuple ) -> Any:
__snake_case : Optional[Any] = "x = 3"
__snake_case : Any = {}
__snake_case : Optional[int] = evaluate(lowerCamelCase , {} , state=lowerCamelCase )
assert result == 3
self.assertDictEqual(lowerCamelCase , {"x": 3} )
__snake_case : str = "x = y"
__snake_case : List[Any] = {"y": 5}
__snake_case : Any = evaluate(lowerCamelCase , {} , state=lowerCamelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(lowerCamelCase , {"x": 5, "y": 5} )
def __snake_case ( self : Any ) -> List[str]:
__snake_case : int = "y = add_two(x)"
__snake_case : Any = {"x": 3}
__snake_case : str = evaluate(lowerCamelCase , {"add_two": add_two} , state=lowerCamelCase )
assert result == 5
self.assertDictEqual(lowerCamelCase , {"x": 3, "y": 5} )
# Won't work without the tool
with CaptureStdout() as out:
__snake_case : str = evaluate(lowerCamelCase , {} , state=lowerCamelCase )
assert result is None
assert "tried to execute add_two" in out.out
def __snake_case ( self : Dict ) -> str:
__snake_case : str = "x = 3"
__snake_case : List[Any] = {}
__snake_case : List[Any] = evaluate(lowerCamelCase , {} , state=lowerCamelCase )
assert result == 3
self.assertDictEqual(lowerCamelCase , {"x": 3} )
def __snake_case ( self : Optional[int] ) -> List[Any]:
__snake_case : Union[str, Any] = "test_dict = {'x': x, 'y': add_two(x)}"
__snake_case : Tuple = {"x": 3}
__snake_case : List[Any] = evaluate(lowerCamelCase , {"add_two": add_two} , state=lowerCamelCase )
self.assertDictEqual(lowerCamelCase , {"x": 3, "y": 5} )
self.assertDictEqual(lowerCamelCase , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def __snake_case ( self : Optional[int] ) -> int:
__snake_case : int = "x = 3\ny = 5"
__snake_case : Optional[int] = {}
__snake_case : List[str] = evaluate(lowerCamelCase , {} , state=lowerCamelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(lowerCamelCase , {"x": 3, "y": 5} )
def __snake_case ( self : Dict ) -> Tuple:
__snake_case : List[Any] = "text = f'This is x: {x}.'"
__snake_case : List[Any] = {"x": 3}
__snake_case : List[str] = evaluate(lowerCamelCase , {} , state=lowerCamelCase )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(lowerCamelCase , {"x": 3, "text": "This is x: 3."} )
def __snake_case ( self : Any ) -> Dict:
__snake_case : List[str] = "if x <= 3:\n y = 2\nelse:\n y = 5"
__snake_case : Tuple = {"x": 3}
__snake_case : int = evaluate(lowerCamelCase , {} , state=lowerCamelCase )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(lowerCamelCase , {"x": 3, "y": 2} )
__snake_case : str = {"x": 8}
__snake_case : List[str] = evaluate(lowerCamelCase , {} , state=lowerCamelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(lowerCamelCase , {"x": 8, "y": 5} )
def __snake_case ( self : int ) -> int:
__snake_case : Tuple = "test_list = [x, add_two(x)]"
__snake_case : List[str] = {"x": 3}
__snake_case : Any = evaluate(lowerCamelCase , {"add_two": add_two} , state=lowerCamelCase )
self.assertListEqual(lowerCamelCase , [3, 5] )
self.assertDictEqual(lowerCamelCase , {"x": 3, "test_list": [3, 5]} )
def __snake_case ( self : Union[str, Any] ) -> Union[str, Any]:
__snake_case : Optional[int] = "y = x"
__snake_case : Any = {"x": 3}
__snake_case : Union[str, Any] = evaluate(lowerCamelCase , {} , state=lowerCamelCase )
assert result == 3
self.assertDictEqual(lowerCamelCase , {"x": 3, "y": 3} )
def __snake_case ( self : Any ) -> Any:
__snake_case : Optional[Any] = "test_list = [x, add_two(x)]\ntest_list[1]"
__snake_case : str = {"x": 3}
__snake_case : Optional[int] = evaluate(lowerCamelCase , {"add_two": add_two} , state=lowerCamelCase )
assert result == 5
self.assertDictEqual(lowerCamelCase , {"x": 3, "test_list": [3, 5]} )
__snake_case : str = "test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"
__snake_case : Optional[Any] = {"x": 3}
__snake_case : Union[str, Any] = evaluate(lowerCamelCase , {"add_two": add_two} , state=lowerCamelCase )
assert result == 5
self.assertDictEqual(lowerCamelCase , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def __snake_case ( self : Dict ) -> List[Any]:
__snake_case : Any = "x = 0\nfor i in range(3):\n x = i"
__snake_case : Union[str, Any] = {}
__snake_case : Any = evaluate(lowerCamelCase , {"range": range} , state=lowerCamelCase )
assert result == 2
self.assertDictEqual(lowerCamelCase , {"x": 2, "i": 2} )
| 81 |
"""simple docstring"""
from __future__ import annotations
__SCREAMING_SNAKE_CASE : Dict = 1.6_0_2_1e-1_9 # units = C
def lowerCAmelCase_( lowercase_ : float , lowercase_ : float , lowercase_ : float , ) -> tuple[str, float]:
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif conductivity < 0:
raise ValueError('''Conductivity cannot be negative''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative''' )
elif mobility < 0:
raise ValueError('''mobility cannot be negative''' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 661 | 0 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( A : int , A : int ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(0 ) != 0 )
def _SCREAMING_SNAKE_CASE ( ) -> None:
"""simple docstring"""
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1)) | 720 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {'''configuration_timm_backbone''': ['''TimmBackboneConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['''TimmBackbone''']
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 61 | 0 |
"""simple docstring"""
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ = get_tests_dir("fixtures/test_sentencepiece.model")
lowercase__ = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
lowercase__ = "pt" if is_torch_available() else "tf"
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( __snake_case , unittest.TestCase ):
_lowerCAmelCase = CamembertTokenizer
_lowerCAmelCase = CamembertTokenizerFast
_lowerCAmelCase = True
_lowerCAmelCase = True
def lowerCAmelCase__(self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__a : List[str] = CamembertTokenizer(_lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = """<pad>"""
__a : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase ) , _lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase ) , _lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>NOTUSED""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(_lowercase ) , 1004 )
def lowerCAmelCase__(self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1005 )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : str = CamembertTokenizer(_lowercase )
tokenizer.save_pretrained(self.tmpdirname )
__a : Optional[Any] = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
__a : List[str] = """I was born in 92000, and this is falsé."""
__a : Any = tokenizer.encode(_lowercase )
__a : Dict = rust_tokenizer.encode(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
__a : Union[str, Any] = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
__a : int = rust_tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
self.assertListEqual(_lowercase , _lowercase )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
__a : List[str] = tokenizer.convert_ids_to_tokens(_lowercase )
__a : str = rust_tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__a : Optional[Any] = self.get_tokenizer()
__a : Dict = self.get_rust_tokenizer()
__a : str = """I was born in 92000, and this is falsé."""
__a : Optional[int] = tokenizer.tokenize(_lowercase )
__a : Any = rust_tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
__a : Dict = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
__a : Dict = rust_tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
self.assertListEqual(_lowercase , _lowercase )
__a : Optional[int] = self.get_rust_tokenizer()
__a : str = tokenizer.encode(_lowercase )
__a : Tuple = rust_tokenizer.encode(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[int] = {"""input_ids""": [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 27575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 22804, 18818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 10326, 24, 2267, 20, 416, 5072, 15612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
__a : str = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name="""camembert-base""" , revision="""3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf""" , sequences=_lowercase , )
| 581 |
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowercase__ = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowercase__ = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowercase__ = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def __magic_name__ ( _lowerCamelCase : str , _lowerCamelCase : str ):
__a : List[str] = len([g for position, g in enumerate(_lowerCamelCase ) if g == main_target[position]] )
return (item, float(_lowerCamelCase ))
def __magic_name__ ( _lowerCamelCase : str , _lowerCamelCase : str ):
__a : Tuple = random.randint(0 , len(_lowerCamelCase ) - 1 )
__a : Any = parent_a[:random_slice] + parent_a[random_slice:]
__a : Dict = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __magic_name__ ( _lowerCamelCase : str , _lowerCamelCase : list[str] ):
__a : List[str] = list(_lowerCamelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
__a : Dict = random.choice(_lowerCamelCase )
return "".join(_lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : tuple[str, float] , _lowerCamelCase : list[tuple[str, float]] , _lowerCamelCase : list[str] , ):
__a : Tuple = []
# Generate more children proportionally to the fitness score.
__a : Union[str, Any] = int(parent_a[1] * 1_0_0 ) + 1
__a : Optional[Any] = 1_0 if child_n >= 1_0 else child_n
for _ in range(_lowerCamelCase ):
__a : Any = population_score[random.randint(0 , _lowerCamelCase )][0]
__a , __a : Union[str, Any] = crossover(parent_a[0] , _lowerCamelCase )
# Append new string to the population list.
pop.append(mutate(_lowerCamelCase , _lowerCamelCase ) )
pop.append(mutate(_lowerCamelCase , _lowerCamelCase ) )
return pop
def __magic_name__ ( _lowerCamelCase : str , _lowerCamelCase : list[str] , _lowerCamelCase : bool = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
__a : Optional[Any] = F'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(_lowerCamelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
__a : Optional[int] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
__a : List[Any] = F'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(_lowerCamelCase )
# Generate random starting population.
__a : Dict = []
for _ in range(_lowerCamelCase ):
population.append("""""".join([random.choice(_lowerCamelCase ) for i in range(len(_lowerCamelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
__a , __a : Tuple = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_lowerCamelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
__a : Any = [evaluate(_lowerCamelCase , _lowerCamelCase ) for item in population]
# Check if there is a matching evolution.
__a : Union[str, Any] = sorted(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] , reverse=_lowerCamelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 1_0 == 0:
print(
F'''\nGeneration: {generation}'''
F'''\nTotal Population:{total_population}'''
F'''\nBest score: {population_score[0][1]}'''
F'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
__a : Optional[Any] = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_lowerCamelCase )
# Normalize population score to be between 0 and 1.
__a : Tuple = [
(item, score / len(_lowerCamelCase )) for item, score in population_score
]
# This is selection
for i in range(_lowerCamelCase ):
population.extend(select(population_score[int(_lowerCamelCase )] , _lowerCamelCase , _lowerCamelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_lowerCamelCase ) > N_POPULATION:
break
if __name__ == "__main__":
lowercase__ = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
lowercase__ = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
lowercase__ , lowercase__ , lowercase__ = basic(target_str, genes_list)
print(
f'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'
)
| 581 | 1 |
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
SCREAMING_SNAKE_CASE__ = set()
return any(
node not in visited and depth_first_search(a_ , a_ , a_ , a_ )
for node in graph )
def UpperCAmelCase_ ( _A , _A , _A , _A ):
'''simple docstring'''
visited.add(a_ )
rec_stk.add(a_ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(a_ , a_ , a_ , a_ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(a_ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 705 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase__ ( A__ , unittest.TestCase ):
"""simple docstring"""
a = KandinskyImgaImgPipeline
a = ["prompt", "image_embeds", "negative_image_embeds", "image"]
a = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
]
a = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
a = False
@property
def lowercase_ ( self : str ) -> List[str]:
return 32
@property
def lowercase_ ( self : Optional[int] ) -> int:
return 32
@property
def lowercase_ ( self : Union[str, Any] ) -> int:
return self.time_input_dim
@property
def lowercase_ ( self : List[str] ) -> int:
return self.time_input_dim * 4
@property
def lowercase_ ( self : Union[str, Any] ) -> Any:
return 100
@property
def lowercase_ ( self : Any ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def lowercase_ ( self : List[Any] ) -> List[Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
SCREAMING_SNAKE_CASE__ = MultilingualCLIP(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = text_encoder.eval()
return text_encoder
@property
def lowercase_ ( self : str ) -> Dict:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE__ = UNetaDConditionModel(**__lowerCamelCase )
return model
@property
def lowercase_ ( self : Dict ) -> Optional[Any]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowercase_ ( self : Tuple ) -> Optional[int]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase_ ( self : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = self.dummy_text_encoder
SCREAMING_SNAKE_CASE__ = self.dummy_tokenizer
SCREAMING_SNAKE_CASE__ = self.dummy_unet
SCREAMING_SNAKE_CASE__ = self.dummy_movq
SCREAMING_SNAKE_CASE__ = {
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.00085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
SCREAMING_SNAKE_CASE__ = DDIMScheduler(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def lowercase_ ( self : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any]=0 ) -> str:
SCREAMING_SNAKE_CASE__ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__lowerCamelCase )
# create init_image
SCREAMING_SNAKE_CASE__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__ = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert('''RGB''' ).resize((256, 256) )
if str(__lowerCamelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__ = torch.manual_seed(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE__ = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = {
'''prompt''': '''horse''',
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def lowercase_ ( self : int ) -> Any:
SCREAMING_SNAKE_CASE__ = '''cpu'''
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = self.pipeline_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = pipe(**self.get_dummy_inputs(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = pipe(
**self.get_dummy_inputs(__lowerCamelCase ) , return_dict=__lowerCamelCase , )[0]
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ = np.array(
[0.61474943, 0.6073539, 0.43308544, 0.5928269, 0.47493595, 0.46755973, 0.4613838, 0.45368797, 0.50119233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : Tuple ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : List[str] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_img2img_frog.npy''' )
SCREAMING_SNAKE_CASE__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
SCREAMING_SNAKE_CASE__ = '''A red cartoon frog, 4k'''
SCREAMING_SNAKE_CASE__ = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = KandinskyImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ = pipeline.to(__lowerCamelCase )
pipeline.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = pipe_prior(
__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE__ = pipeline(
__lowerCamelCase , image=__lowerCamelCase , image_embeds=__lowerCamelCase , negative_image_embeds=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE__ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowerCamelCase , __lowerCamelCase )
| 472 | 0 |
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _a :
"""simple docstring"""
def __init__( self : Optional[int] , UpperCAmelCase : Any , UpperCAmelCase : Any=13 , UpperCAmelCase : Any=30 , UpperCAmelCase : int=2 , UpperCAmelCase : Union[str, Any]=3 , UpperCAmelCase : List[Any]=True , UpperCAmelCase : str=True , UpperCAmelCase : Dict=32 , UpperCAmelCase : str=5 , UpperCAmelCase : Any=4 , UpperCAmelCase : str=37 , UpperCAmelCase : List[Any]="gelu" , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : Optional[Any]=10 , UpperCAmelCase : List[Any]=0.02 , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Tuple=2 , ):
A_ = parent
A_ = batch_size
A_ = image_size
A_ = patch_size
A_ = num_channels
A_ = is_training
A_ = use_labels
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = type_sequence_label_size
A_ = initializer_range
A_ = scope
A_ = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A_ = (image_size // patch_size) ** 2
A_ = num_patches + 1
def __A ( self : Union[str, Any] ):
A_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = self.get_config()
return config, pixel_values, labels
def __A ( self : Optional[Any] ):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __A ( self : Tuple , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : Any ):
A_ = ViTModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] ):
A_ = ViTForMaskedImageModeling(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
A_ = model(UpperCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
A_ = 1
A_ = ViTForMaskedImageModeling(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
A_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __A ( self : str , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : Any ):
A_ = self.type_sequence_label_size
A_ = ViTForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
A_ = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A_ = 1
A_ = ViTForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
A_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __A ( self : Optional[Any] ):
A_ = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) ,
) = config_and_inputs
A_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _a ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : List[str] = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
_lowerCamelCase : Optional[Any] = (
{'feature-extraction': ViTModel, 'image-classification': ViTForImageClassification}
if is_torch_available()
else {}
)
_lowerCamelCase : List[Any] = True
_lowerCamelCase : List[str] = False
_lowerCamelCase : str = False
_lowerCamelCase : Tuple = False
def __A ( self : Optional[Any] ):
A_ = ViTModelTester(self )
A_ = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase , hidden_size=37 )
def __A ( self : Optional[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def __A ( self : Tuple ):
pass
def __A ( self : int ):
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase , nn.Linear ) )
def __A ( self : Tuple ):
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(UpperCAmelCase )
A_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ = [*signature.parameters.keys()]
A_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def __A ( self : Tuple ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def __A ( self : int ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase )
def __A ( self : str ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
@slow
def __A ( self : Tuple ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = ViTModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def __snake_case ( ):
"""simple docstring"""
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __A ( self : List[Any] ):
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def __A ( self : str ):
A_ = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224" ).to(UpperCAmelCase )
A_ = self.default_image_processor
A_ = prepare_img()
A_ = image_processor(images=UpperCAmelCase , return_tensors="pt" ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
A_ = model(**UpperCAmelCase )
# verify the logits
A_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
A_ = torch.tensor([-0.2_744, 0.8_215, -0.0_836] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1E-4 ) )
@slow
def __A ( self : Any ):
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
A_ = ViTModel.from_pretrained("facebook/dino-vits8" ).to(UpperCAmelCase )
A_ = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=480 )
A_ = prepare_img()
A_ = image_processor(images=UpperCAmelCase , return_tensors="pt" )
A_ = inputs.pixel_values.to(UpperCAmelCase )
# forward pass
with torch.no_grad():
A_ = model(UpperCAmelCase , interpolate_pos_encoding=UpperCAmelCase )
# verify the logits
A_ = torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , UpperCAmelCase )
A_ = torch.tensor(
[[4.2_340, 4.3_906, -6.6_692], [4.5_463, 1.8_928, -6.7_257], [4.4_429, 0.8_496, -5.8_585]] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __A ( self : Optional[int] ):
A_ = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto" )
A_ = self.default_image_processor
A_ = prepare_img()
A_ = image_processor(images=UpperCAmelCase , return_tensors="pt" )
A_ = inputs.pixel_values.to(UpperCAmelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
A_ = model(UpperCAmelCase ) | 86 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__a :str = logging.get_logger(__name__)
__a :Any = Dict[str, Any]
__a :int = List[Prediction]
@add_end_docstrings(snake_case_ )
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Tuple , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[Any] ):
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
if self.framework == "tf":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , "vision" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def __A ( self : str , **UpperCAmelCase : str ):
A_ = {}
if "threshold" in kwargs:
A_ = kwargs["threshold"]
return {}, {}, postprocess_kwargs
def __call__( self : Union[str, Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Optional[Any] ):
return super().__call__(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : str , UpperCAmelCase : Any ):
A_ = load_image(UpperCAmelCase )
A_ = torch.IntTensor([[image.height, image.width]] )
A_ = self.image_processor(images=[image] , return_tensors="pt" )
if self.tokenizer is not None:
A_ = self.tokenizer(text=inputs["words"] , boxes=inputs["boxes"] , return_tensors="pt" )
A_ = target_size
return inputs
def __A ( self : Optional[Any] , UpperCAmelCase : Optional[int] ):
A_ = model_inputs.pop("target_size" )
A_ = self.model(**UpperCAmelCase )
A_ = outputs.__class__({"target_size": target_size, **outputs} )
if self.tokenizer is not None:
A_ = model_inputs["bbox"]
return model_outputs
def __A ( self : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any]=0.9 ):
A_ = model_outputs["target_size"]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
A_ , A_ = target_size[0].tolist()
def unnormalize(UpperCAmelCase : Any ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
A_ , A_ = model_outputs["logits"].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
A_ = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
A_ = [unnormalize(UpperCAmelCase ) for bbox in model_outputs["bbox"].squeeze(0 )]
A_ = ["score", "label", "box"]
A_ = [dict(zip(UpperCAmelCase , UpperCAmelCase ) ) for vals in zip(scores.tolist() , UpperCAmelCase , UpperCAmelCase ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
A_ = self.image_processor.post_process_object_detection(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
A_ = raw_annotations[0]
A_ = raw_annotation["scores"]
A_ = raw_annotation["labels"]
A_ = raw_annotation["boxes"]
A_ = scores.tolist()
A_ = [self.model.config.idalabel[label.item()] for label in labels]
A_ = [self._get_bounding_box(UpperCAmelCase ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
A_ = ["score", "label", "box"]
A_ = [
dict(zip(UpperCAmelCase , UpperCAmelCase ) )
for vals in zip(raw_annotation["scores"] , raw_annotation["labels"] , raw_annotation["boxes"] )
]
return annotation
def __A ( self : Tuple , UpperCAmelCase : "torch.Tensor" ):
if self.framework != "pt":
raise ValueError("The ObjectDetectionPipeline is only available in PyTorch." )
A_ , A_ , A_ , A_ = box.int().tolist()
A_ = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox | 86 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=9_9 , snake_case_=3_2 , snake_case_=2 , snake_case_=4 , snake_case_=3_7 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_1_2 , snake_case_=1_6 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , snake_case_=1_0_0_0 , ):
"""simple docstring"""
A_ : Optional[Any] = parent
A_ : Optional[Any] = batch_size
A_ : Tuple = seq_length
A_ : Any = is_training
A_ : List[str] = use_input_mask
A_ : Dict = use_token_type_ids
A_ : Optional[Any] = use_labels
A_ : List[str] = vocab_size
A_ : List[Any] = hidden_size
A_ : Optional[Any] = num_hidden_layers
A_ : Optional[int] = num_attention_heads
A_ : Dict = intermediate_size
A_ : int = hidden_act
A_ : Any = hidden_dropout_prob
A_ : Optional[Any] = attention_probs_dropout_prob
A_ : int = max_position_embeddings
A_ : int = type_vocab_size
A_ : Tuple = type_sequence_label_size
A_ : List[Any] = initializer_range
A_ : int = num_labels
A_ : Union[str, Any] = num_choices
A_ : Optional[int] = scope
A_ : List[Any] = range_bbox
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
A_ : Tuple = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
A_ : str = bbox[i, j, 3]
A_ : Tuple = bbox[i, j, 1]
A_ : str = t
if bbox[i, j, 2] < bbox[i, j, 0]:
A_ : List[str] = bbox[i, j, 2]
A_ : Tuple = bbox[i, j, 0]
A_ : Optional[Any] = t
A_ : int = tf.convert_to_tensor(snake_case_ )
A_ : List[str] = None
if self.use_input_mask:
A_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
A_ : List[str] = None
if self.use_token_type_ids:
A_ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ : int = None
A_ : Dict = None
A_ : Optional[Any] = None
if self.use_labels:
A_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ : str = ids_tensor([self.batch_size] , self.num_choices )
A_ : List[str] = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : int = TFLayoutLMModel(config=snake_case_ )
A_ : Optional[Any] = model(snake_case_ , snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )
A_ : List[Any] = model(snake_case_ , snake_case_ , token_type_ids=snake_case_ )
A_ : Union[str, Any] = model(snake_case_ , snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : Tuple = TFLayoutLMForMaskedLM(config=snake_case_ )
A_ : Any = model(snake_case_ , snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : Tuple = self.num_labels
A_ : Tuple = TFLayoutLMForSequenceClassification(config=snake_case_ )
A_ : str = model(snake_case_ , snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : List[Any] = self.num_labels
A_ : str = TFLayoutLMForTokenClassification(config=snake_case_ )
A_ : List[Any] = model(snake_case_ , snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : str = TFLayoutLMForQuestionAnswering(config=snake_case_ )
A_ : Dict = model(snake_case_ , snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : List[Any] = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : Dict = config_and_inputs
A_ : Tuple = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowercase_ : Optional[int] = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
lowercase_ : Optional[Any] = (
{
"""feature-extraction""": TFLayoutLMModel,
"""fill-mask""": TFLayoutLMForMaskedLM,
"""text-classification""": TFLayoutLMForSequenceClassification,
"""token-classification""": TFLayoutLMForTokenClassification,
"""zero-shot""": TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase_ : List[Any] = False
lowercase_ : str = True
lowercase_ : str = 10
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = TFLayoutLMModelTester(self )
A_ : Union[str, Any] = ConfigTester(self , config_class=snake_case_ , hidden_size=3_7 )
def lowerCamelCase_ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_ )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : int = TFLayoutLMModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@unittest.skip('Onnx compliancy broke with TF 2.10' )
def lowerCamelCase_ ( self ):
"""simple docstring"""
pass
def UpperCAmelCase__ ( ):
"""simple docstring"""
A_ : Union[str, Any] = tf.convert_to_tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]] ) # noqa: E231
A_ : List[Any] = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
A_ : int = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231
A_ : List[str] = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
A_ : str = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = TFLayoutLMModel.from_pretrained('microsoft/layoutlm-base-uncased' )
A_ , A_ , A_ , A_ , A_ : Tuple = prepare_layoutlm_batch_inputs()
# forward pass
A_ : List[str] = model(input_ids=snake_case_ , bbox=snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )
# test the sequence output on [0, :3, :3]
A_ : Any = tf.convert_to_tensor(
[[0.17_85, -0.19_47, -0.04_25], [-0.32_54, -0.28_07, 0.25_53], [-0.53_91, -0.33_22, 0.33_64]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , snake_case_ , atol=1E-3 ) )
# test the pooled output on [1, :3]
A_ : Dict = tf.convert_to_tensor([-0.65_80, -0.02_14, 0.85_52] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , snake_case_ , atol=1E-3 ) )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = TFLayoutLMForSequenceClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=2 )
A_ , A_ , A_ , A_ , A_ : Dict = prepare_layoutlm_batch_inputs()
# forward pass
A_ : Dict = model(
input_ids=snake_case_ , bbox=snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
A_ : List[str] = outputs.loss
A_ : Union[str, Any] = (2,)
self.assertEqual(loss.shape , snake_case_ )
# test the shape of the logits
A_ : Union[str, Any] = outputs.logits
A_ : Any = (2, 2)
self.assertEqual(logits.shape , snake_case_ )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = TFLayoutLMForTokenClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=1_3 )
A_ , A_ , A_ , A_ , A_ : List[str] = prepare_layoutlm_batch_inputs()
# forward pass
A_ : Union[str, Any] = model(
input_ids=snake_case_ , bbox=snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
# test the shape of the logits
A_ : str = outputs.logits
A_ : Union[str, Any] = tf.convert_to_tensor((2, 2_5, 1_3) )
self.assertEqual(logits.shape , snake_case_ )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : str = TFLayoutLMForQuestionAnswering.from_pretrained('microsoft/layoutlm-base-uncased' )
A_ , A_ , A_ , A_ , A_ : Optional[int] = prepare_layoutlm_batch_inputs()
# forward pass
A_ : Dict = model(input_ids=snake_case_ , bbox=snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )
# test the shape of the logits
A_ : Tuple = tf.convert_to_tensor((2, 2_5) )
self.assertEqual(outputs.start_logits.shape , snake_case_ )
self.assertEqual(outputs.end_logits.shape , snake_case_ ) | 302 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
lowercase_ : int
lowercase_ : int
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , snake_case_ ):
"""simple docstring"""
A_ : list[list[Edge]] = [[] for _ in range(snake_case_ )]
A_ : Optional[int] = size
def __getitem__( self , snake_case_ ):
"""simple docstring"""
return iter(self._graph[vertex] )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self._size
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).' )
self._graph[from_vertex].append(Edge(snake_case_ , snake_case_ ) )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : List[str] = deque([start_vertex] )
A_ : list[int | None] = [None] * self.size
A_ : Optional[Any] = 0
while queue:
A_ : Union[str, Any] = queue.popleft()
A_ : Tuple = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
A_ : int = current_distance + edge.weight
A_ : Union[str, Any] = distances[edge.destination_vertex]
if (
isinstance(snake_case_ , snake_case_ )
and new_distance >= dest_vertex_distance
):
continue
A_ : Union[str, Any] = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod() | 302 | 1 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def __UpperCAmelCase ( __a : bytes ,__a : int ) -> np.array:
"""simple docstring"""
_a : int = F"""{sampling_rate}"""
_a : str = '''1'''
_a : Optional[int] = '''f32le'''
_a : Optional[Any] = [
'''ffmpeg''',
'''-i''',
'''pipe:0''',
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
try:
with subprocess.Popen(__a ,stdin=subprocess.PIPE ,stdout=subprocess.PIPE ) as ffmpeg_process:
_a : Any = ffmpeg_process.communicate(__a )
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to load audio files from filename''' ) from error
_a : Optional[Any] = output_stream[0]
_a : Optional[int] = np.frombuffer(__a ,np.floataa )
if audio.shape[0] == 0:
raise ValueError('''Malformed soundfile''' )
return audio
def __UpperCAmelCase ( __a : int ,__a : float ,__a : str = "f32le" ,) -> str:
"""simple docstring"""
_a : Dict = F"""{sampling_rate}"""
_a : Optional[Any] = '''1'''
if format_for_conversion == "s16le":
_a : Dict = 2
elif format_for_conversion == "f32le":
_a : Optional[Any] = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
_a : Dict = platform.system()
if system == "Linux":
_a : Dict = '''alsa'''
_a : Union[str, Any] = '''default'''
elif system == "Darwin":
_a : Union[str, Any] = '''avfoundation'''
_a : List[str] = ''':0'''
elif system == "Windows":
_a : Optional[int] = '''dshow'''
_a : str = '''default'''
_a : Tuple = [
'''ffmpeg''',
'''-f''',
format_,
'''-i''',
input_,
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-fflags''',
'''nobuffer''',
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
_a : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
_a : str = _ffmpeg_stream(__a ,__a )
for item in iterator:
yield item
def __UpperCAmelCase ( __a : int ,__a : float ,__a : Optional[int] = None ,__a : Optional[Union[Tuple[float, float], float]] = None ,__a : str = "f32le" ,) -> Optional[int]:
"""simple docstring"""
if stream_chunk_s is not None:
_a : Tuple = stream_chunk_s
else:
_a : Tuple = chunk_length_s
_a : Tuple = ffmpeg_microphone(__a ,__a ,format_for_conversion=__a )
if format_for_conversion == "s16le":
_a : Any = np.intaa
_a : Optional[int] = 2
elif format_for_conversion == "f32le":
_a : Dict = np.floataa
_a : List[Any] = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
if stride_length_s is None:
_a : List[Any] = chunk_length_s / 6
_a : Optional[int] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(__a ,(int, float) ):
_a : Optional[Any] = [stride_length_s, stride_length_s]
_a : Optional[Any] = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
_a : str = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
_a : Optional[Any] = datetime.datetime.now()
_a : Tuple = datetime.timedelta(seconds=__a )
for item in chunk_bytes_iter(__a ,__a ,stride=(stride_left, stride_right) ,stream=__a ):
# Put everything back in numpy scale
_a : Dict = np.frombuffer(item['''raw'''] ,dtype=__a )
_a : Dict = (
item['''stride'''][0] // size_of_sample,
item['''stride'''][1] // size_of_sample,
)
_a : str = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def __UpperCAmelCase ( __a : Optional[int] ,__a : int ,__a : Tuple[int, int] ,__a : bool = False ) -> Optional[int]:
"""simple docstring"""
_a : Any = b''''''
_a , _a : List[str] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" )
_a : List[str] = 0
for raw in iterator:
acc += raw
if stream and len(__a ) < chunk_len:
_a : Dict = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(__a ) >= chunk_len:
# We are flushing the accumulator
_a : List[str] = (_stride_left, stride_right)
_a : List[Any] = {'''raw''': acc[:chunk_len], '''stride''': stride}
if stream:
_a : List[Any] = False
yield item
_a : Optional[Any] = stride_left
_a : Optional[Any] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(__a ) > stride_left:
_a : Optional[Any] = {'''raw''': acc, '''stride''': (_stride_left, 0)}
if stream:
_a : Dict = False
yield item
def __UpperCAmelCase ( __a : int ,__a : int ) -> Tuple:
"""simple docstring"""
_a : Dict = 2**24 # 16Mo
try:
with subprocess.Popen(__a ,stdout=subprocess.PIPE ,bufsize=__a ) as ffmpeg_process:
while True:
_a : int = ffmpeg_process.stdout.read(__a )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to stream audio files from filename''' ) from error
| 14 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> int:
_a : Dict = '''ZinengTang/tvlt-base'''
_a : List[str] = tempfile.mkdtemp()
def __lowercase ( self , **_a ) -> int:
return TvltImageProcessor.from_pretrained(self.checkpoint , **_a )
def __lowercase ( self , **_a ) -> List[Any]:
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **_a )
def __lowercase ( self ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def __lowercase ( self ) -> Dict:
_a : Union[str, Any] = self.get_image_processor()
_a : Dict = self.get_feature_extractor()
_a : Optional[int] = TvltProcessor(image_processor=_a , feature_extractor=_a )
processor.save_pretrained(self.tmpdirname )
_a : Any = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , _a )
self.assertIsInstance(processor.image_processor , _a )
def __lowercase ( self ) -> Any:
_a : Optional[Any] = self.get_image_processor()
_a : Dict = self.get_feature_extractor()
_a : Dict = TvltProcessor(image_processor=_a , feature_extractor=_a )
_a : Union[str, Any] = np.ones([1_2_0_0_0] )
_a : Dict = feature_extractor(_a , return_tensors='''np''' )
_a : Tuple = processor(audio=_a , return_tensors='''np''' )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowercase ( self ) -> int:
_a : Optional[Any] = self.get_image_processor()
_a : Union[str, Any] = self.get_feature_extractor()
_a : Optional[Any] = TvltProcessor(image_processor=_a , feature_extractor=_a )
_a : List[Any] = np.ones([3, 2_2_4, 2_2_4] )
_a : int = image_processor(_a , return_tensors='''np''' )
_a : Optional[int] = processor(images=_a , return_tensors='''np''' )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowercase ( self ) -> Union[str, Any]:
_a : int = self.get_image_processor()
_a : Union[str, Any] = self.get_feature_extractor()
_a : Any = TvltProcessor(image_processor=_a , feature_extractor=_a )
_a : List[str] = np.ones([1_2_0_0_0] )
_a : Optional[int] = np.ones([3, 2_2_4, 2_2_4] )
_a : int = processor(audio=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def __lowercase ( self ) -> Union[str, Any]:
_a : str = self.get_image_processor()
_a : Union[str, Any] = self.get_feature_extractor()
_a : Dict = TvltProcessor(image_processor=_a , feature_extractor=_a )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
| 14 | 1 |
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _snake_case ( UpperCAmelCase_ , unittest.TestCase ):
__lowerCAmelCase : Tuple = XLMTokenizer
__lowerCAmelCase : Dict = False
def lowercase__ ( self):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__ : Optional[int] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
lowercase__ : Any = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_))))
lowercase__ : int = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
lowercase__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
lowercase__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""])
with open(self.vocab_file , """w""") as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_))
with open(self.merges_file , """w""") as fp:
fp.write("""\n""".join(SCREAMING_SNAKE_CASE_))
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Tuple = """lower newer"""
lowercase__ : List[str] = """lower newer"""
return input_text, output_text
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Union[str, Any] = XLMTokenizer(self.vocab_file , self.merges_file)
lowercase__ : Any = """lower"""
lowercase__ : str = ["""low""", """er</w>"""]
lowercase__ : str = tokenizer.tokenize(SCREAMING_SNAKE_CASE_)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : Any = tokens + ["""<unk>"""]
lowercase__ : List[Any] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_) , SCREAMING_SNAKE_CASE_)
@slow
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Tuple = XLMTokenizer.from_pretrained("""xlm-mlm-en-2048""")
lowercase__ : str = tokenizer.encode("""sequence builders""" , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : Any = tokenizer.encode("""multi-sequence build""" , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_)
lowercase__ : Any = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 495 |
from math import factorial
lowerCamelCase__ : dict[str, int] = {str(digit): factorial(digit) for digit in range(1_0)}
def UpperCamelCase ( lowercase_ ) -> int:
'''simple docstring'''
if not isinstance(lowercase_ , lowercase_ ):
raise TypeError("""Parameter number must be int""" )
if number < 0:
raise ValueError("""Parameter number must be greater than or equal to 0""" )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(lowercase_ ) )
def UpperCamelCase ( lowercase_ = 60 , lowercase_ = 1_00_00_00 ) -> int:
'''simple docstring'''
if not isinstance(lowercase_ , lowercase_ ) or not isinstance(lowercase_ , lowercase_ ):
raise TypeError("""Parameters chain_length and number_limit must be int""" )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
"""Parameters chain_length and number_limit must be greater than 0""" )
# the counter for the chains with the exact desired length
lowercase__ : List[Any] = 0
# the cached sizes of the previous chains
lowercase__ : dict[int, int] = {}
for start_chain_element in range(1 , lowercase_ ):
# The temporary set will contain the elements of the chain
lowercase__ : Optional[Any] = set()
lowercase__ : Union[str, Any] = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
lowercase__ : Union[str, Any] = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(lowercase_ )
chain_set_length += 1
lowercase__ : str = digit_factorial_sum(lowercase_ )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
lowercase__ : str = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{solution()}''')
| 495 | 1 |
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : Dict ) -> Tuple:
"""simple docstring"""
for param in module.parameters():
SCREAMING_SNAKE_CASE_ : Union[str, Any] =False
def SCREAMING_SNAKE_CASE__ ( ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int ="""cuda""" if torch.cuda.is_available() else """cpu"""
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
SCREAMING_SNAKE_CASE_ : Any ="""mps"""
if device == "mps":
print(
'WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'
' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'
' with generations.' )
return device
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] =plt.imshow(lowerCAmelCase_ )
fig.axes.get_xaxis().set_visible(lowerCAmelCase_ )
fig.axes.get_yaxis().set_visible(lowerCAmelCase_ )
plt.show()
def SCREAMING_SNAKE_CASE__ ( ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int =datetime.now()
SCREAMING_SNAKE_CASE_ : Union[str, Any] =current_time.strftime('%H:%M:%S' )
return timestamp
| 220 |
'''simple docstring'''
_lowerCAmelCase = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
_lowerCAmelCase = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
_lowerCAmelCase = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 565 | 0 |
'''simple docstring'''
from bisect import bisect
from itertools import accumulate
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : str = sorted(zip(UpperCamelCase__ , UpperCamelCase__ ) , key=lambda UpperCamelCase__ : x[0] / x[1] , reverse=UpperCamelCase__ )
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = [i[0] for i in r], [i[1] for i in r]
UpperCAmelCase__ : Any = list(accumulate(UpperCamelCase__ ) )
UpperCAmelCase__ : Optional[Any] = bisect(UpperCamelCase__ , UpperCamelCase__ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod() | 113 |
'''simple docstring'''
from itertools import permutations
def _UpperCamelCase ( UpperCamelCase__ ):
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
UpperCAmelCase__ : Optional[int] = [7, 1_1, 1_3, 1_7]
for i, test in enumerate(UpperCamelCase__ ):
if (num[i + 4] * 1_0_0 + num[i + 5] * 1_0 + num[i + 6]) % test != 0:
return False
return True
def _UpperCamelCase ( UpperCamelCase__ = 1_0 ):
return sum(
int("""""".join(map(UpperCamelCase__ , UpperCamelCase__ ) ) )
for num in permutations(range(UpperCamelCase__ ) )
if is_substring_divisible(UpperCamelCase__ ) )
if __name__ == "__main__":
print(f"""{solution() = }""") | 113 | 1 |
"""simple docstring"""
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
_A : List[Any] = """\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"""
_A : List[str] = """\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"""
_A : List[str] = """\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
def __magic_name__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def __magic_name__ ( self , _a , _a , _a = 1 , _a = 4 , ):
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=_a , hypotheses=_a , min_len=_a , max_len=_a )
}
| 361 |
'''simple docstring'''
from __future__ import annotations
def _a ( _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None ) -> None:
"""simple docstring"""
if start is None:
__snake_case : Optional[Any] = 0
if end is None:
__snake_case : Optional[Any] = len(_lowerCamelCase ) - 1
if start >= end:
return
__snake_case : Tuple = (start + end) // 2
slowsort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
slowsort(_lowerCamelCase , mid + 1 , _lowerCamelCase )
if sequence[end] < sequence[mid]:
__snake_case , __snake_case : str = sequence[mid], sequence[end]
slowsort(_lowerCamelCase , _lowerCamelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 26 | 0 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCamelCase ( lowercase_ ):
lowercase = ['image_processor', 'tokenizer']
lowercase = 'LayoutLMv2ImageProcessor'
lowercase = ('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast')
def __init__( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,**__UpperCamelCase ) -> Tuple:
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' ,__UpperCamelCase ,)
lowercase_ : Tuple = kwargs.pop('feature_extractor' )
lowercase_ : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__UpperCamelCase ,__UpperCamelCase )
def __call__( self ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = True ,__UpperCamelCase = False ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = 0 ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = False ,__UpperCamelCase = False ,__UpperCamelCase = False ,__UpperCamelCase = False ,__UpperCamelCase = True ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> BatchEncoding:
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes '
'if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' )
# first, apply the image processor
lowercase_ : List[Any] = self.image_processor(images=__UpperCamelCase ,return_tensors=__UpperCamelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
lowercase_ : List[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowercase_ : List[str] = features['words']
lowercase_ : str = self.tokenizer(
text=text if text is not None else features['words'] ,text_pair=text_pair if text_pair is not None else None ,boxes=boxes if boxes is not None else features['boxes'] ,word_labels=__UpperCamelCase ,add_special_tokens=__UpperCamelCase ,padding=__UpperCamelCase ,truncation=__UpperCamelCase ,max_length=__UpperCamelCase ,stride=__UpperCamelCase ,pad_to_multiple_of=__UpperCamelCase ,return_token_type_ids=__UpperCamelCase ,return_attention_mask=__UpperCamelCase ,return_overflowing_tokens=__UpperCamelCase ,return_special_tokens_mask=__UpperCamelCase ,return_offsets_mapping=__UpperCamelCase ,return_length=__UpperCamelCase ,verbose=__UpperCamelCase ,return_tensors=__UpperCamelCase ,**__UpperCamelCase ,)
# add pixel values
lowercase_ : Optional[Any] = features.pop('pixel_values' )
if return_overflowing_tokens is True:
lowercase_ : Tuple = self.get_overflowing_images(__UpperCamelCase ,encoded_inputs['overflow_to_sample_mapping'] )
lowercase_ : List[Any] = images
return encoded_inputs
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Any = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
f''' {len(__UpperCamelCase )} and {len(__UpperCamelCase )}''' )
return images_with_overflow
def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> int:
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCamelCase ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.decode(*__UpperCamelCase ,**__UpperCamelCase )
@property
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' ,__UpperCamelCase ,)
return self.image_processor_class
@property
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' ,__UpperCamelCase ,)
return self.image_processor
| 477 | """simple docstring"""
__SCREAMING_SNAKE_CASE ="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def lowercase__( __SCREAMING_SNAKE_CASE : bytes ):
# Make sure the supplied data is a bytes-like object
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Dict = F'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = ''.join(bin(__SCREAMING_SNAKE_CASE )[2:].zfill(8 ) for byte in data )
lowercase_ : List[Any] = len(__SCREAMING_SNAKE_CASE ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase_ : Optional[int] = b'=' * ((6 - len(__SCREAMING_SNAKE_CASE ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(__SCREAMING_SNAKE_CASE ) % 6)
else:
lowercase_ : List[Any] = b''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(__SCREAMING_SNAKE_CASE ) , 6 ) ).encode()
+ padding
)
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Tuple = (
'argument should be a bytes-like object or ASCII string, '
F'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(__SCREAMING_SNAKE_CASE )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
try:
lowercase_ : List[str] = encoded_data.decode('utf-8' )
except UnicodeDecodeError:
raise ValueError('base64 encoded data should only contain ASCII characters' )
lowercase_ : List[str] = encoded_data.count('=' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(__SCREAMING_SNAKE_CASE ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase_ : Union[str, Any] = encoded_data[:-padding]
lowercase_ : List[Any] = ''.join(
bin(B64_CHARSET.index(__SCREAMING_SNAKE_CASE ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase_ : int = ''.join(
bin(B64_CHARSET.index(__SCREAMING_SNAKE_CASE ) )[2:].zfill(6 ) for char in encoded_data )
lowercase_ : str = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(__SCREAMING_SNAKE_CASE ) , 8 )
]
return bytes(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 477 | 1 |
import os
import sys
__lowerCamelCase : List[Any] = os.path.join(os.path.dirname(__file__), """src""")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
__lowerCamelCase : Optional[int] = [
"""torch""",
"""numpy""",
"""tokenizers""",
"""filelock""",
"""requests""",
"""tqdm""",
"""regex""",
"""sentencepiece""",
"""sacremoses""",
"""importlib_metadata""",
"""huggingface_hub""",
]
@add_start_docstrings(AutoConfig.__doc__ )
def A__ ( *_a : Union[str, Any] , **_a : str ):
'''simple docstring'''
return AutoConfig.from_pretrained(*_a , **_a )
@add_start_docstrings(AutoTokenizer.__doc__ )
def A__ ( *_a : Tuple , **_a : str ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(*_a , **_a )
@add_start_docstrings(AutoModel.__doc__ )
def A__ ( *_a : Any , **_a : List[str] ):
'''simple docstring'''
return AutoModel.from_pretrained(*_a , **_a )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def A__ ( *_a : Any , **_a : Dict ):
'''simple docstring'''
return AutoModelForCausalLM.from_pretrained(*_a , **_a )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def A__ ( *_a : List[str] , **_a : Optional[int] ):
'''simple docstring'''
return AutoModelForMaskedLM.from_pretrained(*_a , **_a )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def A__ ( *_a : Optional[int] , **_a : Optional[int] ):
'''simple docstring'''
return AutoModelForSequenceClassification.from_pretrained(*_a , **_a )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def A__ ( *_a : List[str] , **_a : Dict ):
'''simple docstring'''
return AutoModelForQuestionAnswering.from_pretrained(*_a , **_a )
| 385 |
from __future__ import annotations
import numpy as np
def A__ ( _a : np.ndarray ):
'''simple docstring'''
snake_case__ , snake_case__ : str =np.shape(_a )
if rows != columns:
snake_case__ : Any =(
"""'table' has to be of square shaped array but got a """
f"{rows}x{columns} array:\n{table}"
)
raise ValueError(_a )
snake_case__ : Dict =np.zeros((rows, columns) )
snake_case__ : str =np.zeros((rows, columns) )
for i in range(_a ):
for j in range(_a ):
snake_case__ : Optional[int] =sum(lower[i][k] * upper[k][j] for k in range(_a ) )
if upper[j][j] == 0:
raise ArithmeticError("""No LU decomposition exists""" )
snake_case__ : List[Any] =(table[i][j] - total) / upper[j][j]
snake_case__ : Optional[int] =1
for j in range(_a , _a ):
snake_case__ : int =sum(lower[i][k] * upper[k][j] for k in range(_a ) )
snake_case__ : Dict =table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 385 | 1 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class _UpperCAmelCase ( lowercase ):
lowerCamelCase_ : Optional[Any] = ["pixel_values"]
def __init__( self : Dict , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Dict[str, int]] = None , UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Union[int, float] = 1 / 2_55 , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , **UpperCAmelCase : str , ):
super().__init__(**UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Dict = size if size is not None else {"shortest_edge": 2_56}
SCREAMING_SNAKE_CASE_ :Any = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Dict = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
SCREAMING_SNAKE_CASE_ :Optional[Any] = get_size_dict(UpperCAmelCase , param_name="crop_size")
SCREAMING_SNAKE_CASE_ :List[Any] = do_resize
SCREAMING_SNAKE_CASE_ :Optional[Any] = size
SCREAMING_SNAKE_CASE_ :Optional[int] = resample
SCREAMING_SNAKE_CASE_ :List[str] = do_center_crop
SCREAMING_SNAKE_CASE_ :int = crop_size
SCREAMING_SNAKE_CASE_ :Union[str, Any] = do_rescale
SCREAMING_SNAKE_CASE_ :List[Any] = rescale_factor
SCREAMING_SNAKE_CASE_ :Optional[Any] = do_normalize
SCREAMING_SNAKE_CASE_ :Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE_ :Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _snake_case ( self : str , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Tuple , ):
SCREAMING_SNAKE_CASE_ :Union[str, Any] = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase)
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}")
SCREAMING_SNAKE_CASE_ :Union[str, Any] = get_resize_output_image_size(UpperCAmelCase , size=size["shortest_edge"] , default_to_square=UpperCAmelCase)
return resize(UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase)
def _snake_case ( self : Optional[int] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[int] , ):
SCREAMING_SNAKE_CASE_ :Optional[int] = get_size_dict(UpperCAmelCase)
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}")
return center_crop(UpperCAmelCase , size=(size["height"], size["width"]) , data_format=UpperCAmelCase , **UpperCAmelCase)
def _snake_case ( self : Any , UpperCAmelCase : np.ndarray , UpperCAmelCase : float , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[Any]):
return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase)
def _snake_case ( self : List[str] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[float, List[float]] , UpperCAmelCase : Union[float, List[float]] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Union[str, Any] , ):
return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase)
def _snake_case ( self : Any , UpperCAmelCase : ImageInput , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = None , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[float] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , UpperCAmelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCAmelCase : List[Any] , ):
SCREAMING_SNAKE_CASE_ :Optional[int] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_ :Any = size if size is not None else self.size
SCREAMING_SNAKE_CASE_ :Union[str, Any] = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :List[Any] = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_ :Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE_ :Optional[Any] = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE_ :List[str] = get_size_dict(UpperCAmelCase , param_name="crop_size")
SCREAMING_SNAKE_CASE_ :Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE_ :Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE_ :Dict = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_ :Optional[Any] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE_ :str = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE_ :List[Any] = make_list_of_images(UpperCAmelCase)
if not valid_images(UpperCAmelCase):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True.")
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True.")
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_ :List[str] = [to_numpy_array(UpperCAmelCase) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = [self.resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE_ :int = [self.center_crop(image=UpperCAmelCase , size=UpperCAmelCase) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE_ :Optional[int] = [self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_ :Any = [self.normalize(image=UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase) for image in images]
SCREAMING_SNAKE_CASE_ :Tuple = [to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase) for image in images]
SCREAMING_SNAKE_CASE_ :List[Any] = {"pixel_values": images}
return BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase)
def _snake_case ( self : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Tuple] = None):
SCREAMING_SNAKE_CASE_ :str = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCAmelCase) != len(UpperCAmelCase):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits")
if is_torch_tensor(UpperCAmelCase):
SCREAMING_SNAKE_CASE_ :Union[str, Any] = target_sizes.numpy()
SCREAMING_SNAKE_CASE_ :str = []
for idx in range(len(UpperCAmelCase)):
SCREAMING_SNAKE_CASE_ :Union[str, Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode="bilinear" , align_corners=UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Any = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(UpperCAmelCase)
else:
SCREAMING_SNAKE_CASE_ :int = logits.argmax(dim=1)
SCREAMING_SNAKE_CASE_ :Optional[int] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation | 706 |
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def lowercase ( a , a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Optional[int] = k_size // 2
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :List[str] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
SCREAMING_SNAKE_CASE_ :Optional[Any] = 1 / (2 * pi * sigma) * exp(-(square(a ) + square(a )) / (2 * square(a )) )
return g
def lowercase ( a , a , a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :str = image.shape[0], image.shape[1]
# dst image height and width
SCREAMING_SNAKE_CASE_ :Optional[int] = height - k_size + 1
SCREAMING_SNAKE_CASE_ :Any = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
SCREAMING_SNAKE_CASE_ :Any = zeros((dst_height * dst_width, k_size * k_size) )
SCREAMING_SNAKE_CASE_ :List[str] = 0
for i, j in product(range(a ) , range(a ) ):
SCREAMING_SNAKE_CASE_ :Union[str, Any] = ravel(image[i : i + k_size, j : j + k_size] )
SCREAMING_SNAKE_CASE_ :Dict = window
row += 1
# turn the kernel into shape(k*k, 1)
SCREAMING_SNAKE_CASE_ :Tuple = gen_gaussian_kernel(a , a )
SCREAMING_SNAKE_CASE_ :List[Any] = ravel(a )
# reshape and get the dst image
SCREAMING_SNAKE_CASE_ :Dict = dot(a , a ).reshape(a , a ).astype(a )
return dst
if __name__ == "__main__":
# read original image
SCREAMING_SNAKE_CASE__ = imread(R"../image_data/lena.jpg")
# turn image in gray scale value
SCREAMING_SNAKE_CASE__ = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
SCREAMING_SNAKE_CASE__ = gaussian_filter(gray, 3, sigma=1)
SCREAMING_SNAKE_CASE__ = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow("gaussian filter with 3x3 mask", gaussianaxa)
imshow("gaussian filter with 5x5 mask", gaussianaxa)
waitKey()
| 140 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __magic_name__ ( __snake_case ,unittest.TestCase ):
UpperCamelCase : Optional[int] = DDIMPipeline
UpperCamelCase : Union[str, Any] = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
UpperCamelCase : List[Any] = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"latents",
"callback",
"callback_steps",
}
UpperCamelCase : Union[str, Any] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
UpperCamelCase : List[Any] = False
def _lowerCamelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_lowerCAmelCase = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
_lowerCAmelCase = DDIMScheduler()
_lowerCAmelCase = {'unet': unet, 'scheduler': scheduler}
return components
def _lowerCamelCase ( self , __magic_name__ , __magic_name__=0 ):
"""simple docstring"""
if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ):
_lowerCAmelCase = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
_lowerCAmelCase = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = 'cpu'
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = pipe(**_SCREAMING_SNAKE_CASE ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 3_2, 3_2, 3) )
_lowerCAmelCase = np.array(
[1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04] )
_lowerCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1e-3 )
def _lowerCamelCase ( self ):
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def _lowerCamelCase ( self ):
"""simple docstring"""
super().test_save_load_local(expected_max_difference=3e-3 )
def _lowerCamelCase ( self ):
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def _lowerCamelCase ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = 'google/ddpm-cifar10-32'
_lowerCAmelCase = UNetaDModel.from_pretrained(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = DDIMScheduler()
_lowerCAmelCase = DDIMPipeline(unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE )
ddim.to(_SCREAMING_SNAKE_CASE )
ddim.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = ddim(generator=_SCREAMING_SNAKE_CASE , eta=0.0 , output_type='numpy' ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
_lowerCAmelCase = np.array([0.17_23, 0.16_17, 0.16_00, 0.16_26, 0.14_97, 0.15_13, 0.15_05, 0.14_42, 0.14_53] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = 'google/ddpm-ema-bedroom-256'
_lowerCAmelCase = UNetaDModel.from_pretrained(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = DDIMScheduler.from_pretrained(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = DDIMPipeline(unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE )
ddpm.to(_SCREAMING_SNAKE_CASE )
ddpm.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = ddpm(generator=_SCREAMING_SNAKE_CASE , output_type='numpy' ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
_lowerCAmelCase = np.array([0.00_60, 0.02_01, 0.03_44, 0.00_24, 0.00_18, 0.00_02, 0.00_22, 0.00_00, 0.00_69] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 589 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
__magic_name__ : Any = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__magic_name__ : Optional[Any] = 12_8022
__magic_name__ : Dict = 12_8028
@require_sentencepiece
class A__ ( __snake_case , unittest.TestCase ):
'''simple docstring'''
snake_case__ = MaMaaaTokenizer
snake_case__ = False
snake_case__ = False
snake_case__ = True
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
super().setUp()
UpperCamelCase = ['</s>', '<unk>', '▁This', '▁is', '▁a', '▁t', 'est', '\u0120', '<pad>']
UpperCamelCase = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
UpperCamelCase = Path(self.tmpdirname )
save_json(_SCREAMING_SNAKE_CASE , save_dir / VOCAB_FILES_NAMES['vocab_file'] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_SCREAMING_SNAKE_CASE , save_dir / VOCAB_FILES_NAMES['spm_file'] )
UpperCamelCase = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self : str , **_SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
return (
"This is a test",
"This is a test",
)
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = '</s>'
UpperCamelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '</s>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '<s>' )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip('Skip this test while all models are still to be uploaded.' )
def _SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = tokenizer.tokenize('This is a test' )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , [2, 3, 4, 5, 6] , )
UpperCamelCase = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ['▁This', '▁is', '▁a', '▁t', 'est'] )
UpperCamelCase = tokenizer.convert_tokens_to_string(_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , 'This is a test' )
@slow
def _SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
UpperCamelCase = {'input_ids': [[12_8022, 11_0108, 397, 11, 3_8272, 2247, 12_4811, 285, 1_8105, 1586, 207, 7, 3_9534, 4428, 397, 1019, 1_8105, 1586, 207, 7, 4_1337, 1_6786, 241, 7, 2_0214, 17, 12_5690, 1_0398, 7, 4_4378, 5_8069, 6_8342, 7798, 7343, 11, 299, 3_3310, 4, 158, 3_7350, 9_4077, 4569, 299, 3_3310, 90, 4, 5_2840, 290, 4, 3_1270, 112, 299, 682, 4, 5_2840, 3_9953, 1_4079, 193, 5_2519, 9_0894, 1_7894, 12_0697, 11, 4_0445, 551, 17, 1019, 5_2519, 9_0894, 1_7756, 963, 11, 4_0445, 480, 17, 9792, 1120, 5173, 1393, 6240, 1_6786, 241, 12_0996, 28, 1245, 1393, 11_8240, 1_1123, 1019, 9_3612, 2691, 1_0618, 9_8058, 12_0409, 1928, 279, 4, 4_0683, 367, 178, 207, 1019, 103, 10_3121, 506, 6_5296, 5, 2], [12_8022, 2_1217, 367, 117, 12_5450, 128, 719, 7, 7308, 40, 9_3612, 1_2669, 1116, 1_6704, 71, 1_7785, 3699, 1_5592, 35, 144, 9584, 241, 1_1943, 713, 950, 799, 2247, 8_8427, 150, 149, 11_8813, 12_0706, 1019, 10_6906, 8_1518, 28, 1224, 2_2799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [12_8022, 1658, 12_3311, 5155, 5578, 4722, 279, 1_4947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_SCREAMING_SNAKE_CASE , model_name='facebook/m2m100_418M' , revision='c168bae485c864188cf9aa0e4108b0b6934dc91e' , )
@require_torch
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase ):
'''simple docstring'''
snake_case__ = """facebook/m2m100_418M"""
snake_case__ = [
"""In my opinion, there are two levels of response from the French government.""",
"""NSA Affair Emphasizes Complete Lack of Debate on Intelligence""",
]
snake_case__ = [
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
]
# fmt: off
snake_case__ = [EN_CODE, 5_93, 19_49, 11_57_81, 4, 7_15_86, 42_34, 6_06_33, 12_62_33, 4_32, 12_38_08, 1_55_92, 11_97, 11_71_32, 12_06_18, 5, 2]
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any ):
"""simple docstring"""
UpperCamelCase = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en' , tgt_lang='fr' )
UpperCamelCase = 1
return cls
def _SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
self.assertEqual(self.tokenizer.get_lang_id('ar' ) , 12_8006 )
self.assertEqual(self.tokenizer.get_lang_id('en' ) , 12_8022 )
self.assertEqual(self.tokenizer.get_lang_id('ro' ) , 12_8076 )
self.assertEqual(self.tokenizer.get_lang_id('mr' ) , 12_8063 )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.tokenizer.get_vocab()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.tokenizer.vocab_size )
self.assertEqual(vocab['<unk>'] , 3 )
self.assertIn(self.tokenizer.get_lang_token('en' ) , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = 'en'
UpperCamelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
self.assertIn(_SCREAMING_SNAKE_CASE , self.tokenizer.all_special_ids )
# fmt: off
UpperCamelCase = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 1_4028, 136, 3286, 9706, 6, 9_0797, 6, 14_4012, 162, 8_8128, 3_0061, 5, 2]
# fmt: on
UpperCamelCase = self.tokenizer.decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertNotIn(self.tokenizer.eos_token , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = tempfile.mkdtemp()
UpperCamelCase = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase = MaMaaaTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertDictEqual(new_tok.lang_token_to_id , _SCREAMING_SNAKE_CASE )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = 'en'
UpperCamelCase = 'fr'
UpperCamelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
UpperCamelCase = shift_tokens_right(
batch['labels'] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
UpperCamelCase = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
UpperCamelCase = 'mr'
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('mr' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
UpperCamelCase = 'zh'
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('zh' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = 'mr'
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('mr' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
UpperCamelCase = 'zh'
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('zh' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.tokenizer._build_translation_inputs('A test' , return_tensors='pt' , src_lang='en' , tgt_lang='ar' )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , {
# en_XX, A, test, EOS
'input_ids': [[12_8022, 58, 4183, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 12_8006,
} , )
| 280 | 0 |
"""simple docstring"""
import numpy as np
def _UpperCamelCase ( UpperCamelCase ) -> np.array:
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 487 |
"""simple docstring"""
def _UpperCamelCase ( UpperCamelCase ) -> int:
"""simple docstring"""
if not grid or not grid[0]:
raise TypeError("The grid does not contain the appropriate information" )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
__UpperCAmelCase : int = grid[0]
for row_n in range(1 , len(UpperCamelCase ) ):
__UpperCAmelCase : int = grid[row_n]
__UpperCAmelCase : str = fill_row(UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : List[str] = grid[row_n]
return grid[-1][-1]
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> list:
"""simple docstring"""
current_row[0] += row_above[0]
for cell_n in range(1 , len(UpperCamelCase ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 487 | 1 |
'''simple docstring'''
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : int = {
'''bart''': (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''bert''': (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-base-cased-finetuned-mrpc''': (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''dpr''': (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''gpt2''': (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlnet''': (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlm''': (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlm-roberta''': (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''transfo-xl''': (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''openai-gpt''': (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''roberta''': (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''layoutlm''': (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''roberta-large-mnli''': (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''camembert''': (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''flaubert''': (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''distilbert''': (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''distilbert-base-distilled-squad''': (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''lxmert''': (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''lxmert-visual-feature-encoder''': (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''ctrl''': (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''albert''': (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''t5''': (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''electra''': (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''wav2vec2''': (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def a ( UpperCamelCase_ : Any , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int]=False , UpperCamelCase_ : Union[str, Any]=True ) -> List[str]:
if model_type not in MODEL_CLASSES:
raise ValueError(f"""Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.""" )
snake_case__ , snake_case__ , snake_case__ , snake_case__ =MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
snake_case__ =cached_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , force_download=not use_cached_models )
snake_case__ =config_class.from_json_file(SCREAMING_SNAKE_CASE_ )
snake_case__ =True
snake_case__ =True
print(f"""Building TensorFlow model from configuration: {config}""" )
snake_case__ =model_class(SCREAMING_SNAKE_CASE_ )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
snake_case__ =cached_file(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
snake_case__ =load_pytorch_checkpoint_in_tfa_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if compare_with_pt_model:
snake_case__ =tf_model(tf_model.dummy_inputs , training=SCREAMING_SNAKE_CASE_ ) # build the network
snake_case__ =torch.load(SCREAMING_SNAKE_CASE_ , map_location='cpu' )
snake_case__ =pt_model_class.from_pretrained(
pretrained_model_name_or_path=SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ , state_dict=SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
snake_case__ =pt_model(**pt_model.dummy_inputs )
snake_case__ =pto[0].numpy()
snake_case__ =tfo[0].numpy()
snake_case__ =np.amax(np.abs(np_pt - np_tf ) )
print(f"""Max absolute difference between models outputs {diff}""" )
assert diff <= 2e-2, f"""Error, model absolute difference is >2e-2: {diff}"""
# Save pytorch-model
print(f"""Save TensorFlow model to {tf_dump_path}""" )
tf_model.save_weights(SCREAMING_SNAKE_CASE_ , save_format='h5' )
def a ( UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : List[str]=False , UpperCamelCase_ : List[str]=False , UpperCamelCase_ : Dict=False , UpperCamelCase_ : List[Any]=False , ) -> Any:
if args_model_type is None:
snake_case__ =list(MODEL_CLASSES.keys() )
else:
snake_case__ =[args_model_type]
for j, model_type in enumerate(SCREAMING_SNAKE_CASE_ , start=1 ):
print('=' * 100 )
print(f""" Converting model type {j}/{len(SCREAMING_SNAKE_CASE_ )}: {model_type}""" )
print('=' * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(f"""Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.""" )
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ =MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
snake_case__ =list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
snake_case__ =model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , start=1 ):
print('-' * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(f""" Skipping finetuned checkpoint {model_shortcut_name}""" )
continue
snake_case__ =model_shortcut_name
elif only_convert_finetuned_models:
print(f""" Skipping not finetuned checkpoint {model_shortcut_name}""" )
continue
print(
f""" Converting checkpoint {i}/{len(SCREAMING_SNAKE_CASE_ )}: {model_shortcut_name} - model_type {model_type}""" )
print('-' * 100 )
if config_shortcut_name in aws_config_map:
snake_case__ =cached_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , force_download=not use_cached_models )
else:
snake_case__ =config_shortcut_name
if model_shortcut_name in aws_model_maps:
snake_case__ =cached_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , force_download=not use_cached_models )
else:
snake_case__ =model_shortcut_name
if os.path.isfile(SCREAMING_SNAKE_CASE_ ):
snake_case__ ='converted_model'
convert_pt_checkpoint_to_tf(
model_type=SCREAMING_SNAKE_CASE_ , pytorch_checkpoint_path=SCREAMING_SNAKE_CASE_ , config_file=SCREAMING_SNAKE_CASE_ , tf_dump_path=os.path.join(SCREAMING_SNAKE_CASE_ , model_shortcut_name + '-tf_model.h5' ) , compare_with_pt_model=SCREAMING_SNAKE_CASE_ , )
if remove_cached_files:
os.remove(SCREAMING_SNAKE_CASE_ )
os.remove(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_dump_path''', default=None, type=str, required=True, help='''Path to the output Tensorflow dump file.'''
)
parser.add_argument(
'''--model_type''',
default=None,
type=str,
help=(
f"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """
'''convert all the models from AWS.'''
),
)
parser.add_argument(
'''--pytorch_checkpoint_path''',
default=None,
type=str,
help=(
'''Path to the PyTorch checkpoint path or shortcut name to download from AWS. '''
'''If not given, will download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
help=(
'''The config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture. If not given and '''
'''--pytorch_checkpoint_path is not given or is a shortcut name '''
'''use the configuration associated to the shortcut name on the AWS'''
),
)
parser.add_argument(
'''--compare_with_pt_model''', action='''store_true''', help='''Compare Tensorflow and PyTorch model predictions.'''
)
parser.add_argument(
'''--use_cached_models''',
action='''store_true''',
help='''Use cached models if possible instead of updating to latest checkpoint versions.''',
)
parser.add_argument(
'''--remove_cached_files''',
action='''store_true''',
help='''Remove pytorch models after conversion (save memory when converting in batches).''',
)
parser.add_argument('''--only_convert_finetuned_models''', action='''store_true''', help='''Only convert finetuned models.''')
SCREAMING_SNAKE_CASE__ : Dict = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 538 |
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
A : Union[str, Any] = pd.read_csv(
'''https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/'''
'''position_salaries.csv'''
)
A : Union[str, Any] = dataset.iloc[:, 1:2].values
A : Dict = dataset.iloc[:, 2].values
A , A , A , A : str = train_test_split(X, y, test_size=0.2, random_state=0)
A : Union[str, Any] = PolynomialFeatures(degree=4)
A : str = poly_reg.fit_transform(X)
A : Union[str, Any] = LinearRegression()
pol_reg.fit(X_poly, y)
def UpperCamelCase__ ( ) -> Any:
plt.scatter(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , color="""red""" )
plt.plot(SCREAMING_SNAKE_CASE_ , pol_reg.predict(poly_reg.fit_transform(SCREAMING_SNAKE_CASE_ ) ) , color="""blue""" )
plt.title("""Truth or Bluff (Linear Regression)""" )
plt.xlabel("""Position level""" )
plt.ylabel("""Salary""" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003 | 287 | 0 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.17.0.dev0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
__SCREAMING_SNAKE_CASE : Dict =logging.getLogger(__name__)
@dataclass
class __magic_name__ :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[str] = field(
default="tab_fact" , metadata={"help": "The name of the dataset to use (via the datasets library)."})
SCREAMING_SNAKE_CASE__ : Optional[str] = field(
default="tab_fact" , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} , )
SCREAMING_SNAKE_CASE__ : int = field(
default=1024 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
SCREAMING_SNAKE_CASE__ : bool = field(
default=__UpperCAmelCase , metadata={"help": "Overwrite the cached preprocessed datasets or not."})
SCREAMING_SNAKE_CASE__ : bool = field(
default=__UpperCAmelCase , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
SCREAMING_SNAKE_CASE__ : Optional[int] = field(
default=__UpperCAmelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
SCREAMING_SNAKE_CASE__ : Optional[int] = field(
default=__UpperCAmelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
SCREAMING_SNAKE_CASE__ : Optional[int] = field(
default=__UpperCAmelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} , )
SCREAMING_SNAKE_CASE__ : Optional[str] = field(
default=__UpperCAmelCase , metadata={"help": "A csv or a json file containing the training data."})
SCREAMING_SNAKE_CASE__ : Optional[str] = field(
default=__UpperCAmelCase , metadata={"help": "A csv or a json file containing the validation data."})
SCREAMING_SNAKE_CASE__ : Optional[str] = field(default=__UpperCAmelCase , metadata={"help": "A csv or a json file containing the test data."})
def _A ( self: Dict ):
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' )
else:
SCREAMING_SNAKE_CASE_ = self.train_file.split('''.''' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
SCREAMING_SNAKE_CASE_ = self.validation_file.split('''.''' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class __magic_name__ :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = field(
default=__UpperCAmelCase , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
SCREAMING_SNAKE_CASE__ : Optional[str] = field(
default=__UpperCAmelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"})
SCREAMING_SNAKE_CASE__ : Optional[str] = field(
default=__UpperCAmelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
SCREAMING_SNAKE_CASE__ : Optional[str] = field(
default=__UpperCAmelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
SCREAMING_SNAKE_CASE__ : bool = field(
default=__UpperCAmelCase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
SCREAMING_SNAKE_CASE__ : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
SCREAMING_SNAKE_CASE__ : bool = field(
default=__UpperCAmelCase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
def a ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
SCREAMING_SNAKE_CASE_ = training_args.get_process_log_level()
logger.setLevel(_lowerCAmelCase )
datasets.utils.logging.set_verbosity(_lowerCAmelCase )
transformers.utils.logging.set_verbosity(_lowerCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
SCREAMING_SNAKE_CASE_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
SCREAMING_SNAKE_CASE_ = {'''train''': data_args.train_file, '''validation''': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
SCREAMING_SNAKE_CASE_ = data_args.train_file.split('''.''' )[-1]
SCREAMING_SNAKE_CASE_ = data_args.test_file.split('''.''' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
SCREAMING_SNAKE_CASE_ = data_args.test_file
else:
raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' )
for key in data_files.keys():
logger.info(F"load a local file for {key}: {data_files[key]}" )
if data_args.train_file.endswith('''.csv''' ):
# Loading a dataset from local csv files
SCREAMING_SNAKE_CASE_ = load_dataset('''csv''' , data_files=_lowerCAmelCase , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
SCREAMING_SNAKE_CASE_ = load_dataset('''json''' , data_files=_lowerCAmelCase , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
SCREAMING_SNAKE_CASE_ = raw_datasets['''train'''].features['''label'''].names
SCREAMING_SNAKE_CASE_ = len(_lowerCAmelCase )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
SCREAMING_SNAKE_CASE_ = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
SCREAMING_SNAKE_CASE_ = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
SCREAMING_SNAKE_CASE_ = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
SCREAMING_SNAKE_CASE_ = {'''Refused''': 0, '''Entailed''': 1}
SCREAMING_SNAKE_CASE_ = {0: '''Refused''', 1: '''Entailed'''}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
SCREAMING_SNAKE_CASE_ = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(_lowerCAmelCase ):
# Tokenize the texts
def _convert_table_text_to_pandas(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )]
SCREAMING_SNAKE_CASE_ = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
SCREAMING_SNAKE_CASE_ = examples['''statement''']
SCREAMING_SNAKE_CASE_ = list(map(_convert_table_text_to_pandas , examples['''table_text'''] ) )
SCREAMING_SNAKE_CASE_ = tokenizer(_lowerCAmelCase , _lowerCAmelCase , padding=_lowerCAmelCase , max_length=_lowerCAmelCase , truncation=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = examples['''label''']
return result
with training_args.main_process_first(desc='''dataset map pre-processing''' ):
SCREAMING_SNAKE_CASE_ = raw_datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on dataset''' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
SCREAMING_SNAKE_CASE_ = raw_datasets['''train''']
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE_ = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
SCREAMING_SNAKE_CASE_ = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE_ = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('''--do_predict requires a test dataset''' )
SCREAMING_SNAKE_CASE_ = raw_datasets['''test''']
if data_args.max_predict_samples is not None:
SCREAMING_SNAKE_CASE_ = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(_lowerCAmelCase ) ) , 3 ):
logger.info(F"Sample {index} of the training set: {train_dataset[index]}." )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = p.predictions[0] if isinstance(p.predictions , _lowerCAmelCase ) else p.predictions
SCREAMING_SNAKE_CASE_ = np.argmax(_lowerCAmelCase , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
SCREAMING_SNAKE_CASE_ = default_data_collator
elif training_args.fpaa:
SCREAMING_SNAKE_CASE_ = DataCollatorWithPadding(_lowerCAmelCase , pad_to_multiple_of=8 )
else:
SCREAMING_SNAKE_CASE_ = None
# Initialize our Trainer
SCREAMING_SNAKE_CASE_ = Trainer(
model=_lowerCAmelCase , args=_lowerCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_lowerCAmelCase , tokenizer=_lowerCAmelCase , data_collator=_lowerCAmelCase , )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE_ = None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE_ = last_checkpoint
SCREAMING_SNAKE_CASE_ = trainer.train(resume_from_checkpoint=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = train_result.metrics
SCREAMING_SNAKE_CASE_ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_lowerCAmelCase )
)
SCREAMING_SNAKE_CASE_ = min(_lowerCAmelCase , len(_lowerCAmelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , _lowerCAmelCase )
trainer.save_metrics('''train''' , _lowerCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
SCREAMING_SNAKE_CASE_ = trainer.evaluate(eval_dataset=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = min(_lowerCAmelCase , len(_lowerCAmelCase ) )
trainer.log_metrics('''eval''' , _lowerCAmelCase )
trainer.save_metrics('''eval''' , _lowerCAmelCase )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
SCREAMING_SNAKE_CASE_ = predict_dataset.remove_columns('''label''' )
SCREAMING_SNAKE_CASE_ = trainer.predict(_lowerCAmelCase , metric_key_prefix='''predict''' ).predictions
SCREAMING_SNAKE_CASE_ = np.argmax(_lowerCAmelCase , axis=1 )
SCREAMING_SNAKE_CASE_ = os.path.join(training_args.output_dir , '''predict_results_tabfact.txt''' )
if trainer.is_world_process_zero():
with open(_lowerCAmelCase , '''w''' ) as writer:
logger.info('''***** Predict Results *****''' )
writer.write('''index\tprediction\n''' )
for index, item in enumerate(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = label_list[item]
writer.write(F"{index}\t{item}\n" )
SCREAMING_SNAKE_CASE_ = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''}
if training_args.push_to_hub:
trainer.push_to_hub(**_lowerCAmelCase )
else:
trainer.create_model_card(**_lowerCAmelCase )
def a (_lowerCAmelCase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 717 |
def a (_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = [], []
while len(_lowerCAmelCase ) > 1:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = min(_lowerCAmelCase ), max(_lowerCAmelCase )
start.append(_lowerCAmelCase )
end.append(_lowerCAmelCase )
collection.remove(_lowerCAmelCase )
collection.remove(_lowerCAmelCase )
end.reverse()
return start + collection + end
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =input("""Enter numbers separated by a comma:\n""").strip()
__SCREAMING_SNAKE_CASE =[int(item) for item in user_input.split(""",""")]
print(*merge_sort(unsorted), sep=""",""")
| 89 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'''google/pegasus-large''': '''https://huggingface.co/google/pegasus-large/resolve/main/config.json''',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class __snake_case ( a__):
_lowerCAmelCase = "pegasus"
_lowerCAmelCase = ["past_key_values"]
_lowerCAmelCase = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self, A=5_0265, A=1024, A=12, A=4096, A=16, A=12, A=4096, A=16, A=0.0, A=0.0, A=True, A=True, A="gelu", A=1024, A=0.1, A=0.0, A=0.0, A=0.02, A=0, A=False, A=0, A=1, A=1, **A, ):
"""simple docstring"""
lowerCamelCase : List[str] = vocab_size
lowerCamelCase : List[str] = max_position_embeddings
lowerCamelCase : Tuple = d_model
lowerCamelCase : Optional[Any] = encoder_ffn_dim
lowerCamelCase : Tuple = encoder_layers
lowerCamelCase : Optional[Any] = encoder_attention_heads
lowerCamelCase : str = decoder_ffn_dim
lowerCamelCase : List[str] = decoder_layers
lowerCamelCase : Tuple = decoder_attention_heads
lowerCamelCase : Dict = dropout
lowerCamelCase : Tuple = attention_dropout
lowerCamelCase : List[Any] = activation_dropout
lowerCamelCase : int = activation_function
lowerCamelCase : int = init_std
lowerCamelCase : Optional[int] = encoder_layerdrop
lowerCamelCase : Tuple = decoder_layerdrop
lowerCamelCase : Optional[Any] = use_cache
lowerCamelCase : int = encoder_layers
lowerCamelCase : int = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=A, eos_token_id=A, is_encoder_decoder=A, decoder_start_token_id=A, forced_eos_token_id=A, **A, )
@property
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return self.d_model
| 320 |
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE__ ( metaclass=UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Tuple = ["transformers", "torch", "note_seq"]
def __init__( self : List[Any] , *lowercase : List[Any] , **lowercase : Dict ):
'''simple docstring'''
requires_backends(self , ['transformers', 'torch', 'note_seq'] )
@classmethod
def A ( cls : Union[str, Any] , *lowercase : List[str] , **lowercase : Any ):
'''simple docstring'''
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
@classmethod
def A ( cls : Union[str, Any] , *lowercase : List[str] , **lowercase : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['transformers', 'torch', 'note_seq'] ) | 686 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE = {
'configuration_blip': [
'BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlipConfig',
'BlipTextConfig',
'BlipVisionConfig',
],
'processing_blip': ['BlipProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['BlipImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlipModel',
'BlipPreTrainedModel',
'BlipForConditionalGeneration',
'BlipForQuestionAnswering',
'BlipVisionModel',
'BlipTextModel',
'BlipForImageTextRetrieval',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBlipModel',
'TFBlipPreTrainedModel',
'TFBlipForConditionalGeneration',
'TFBlipForQuestionAnswering',
'TFBlipVisionModel',
'TFBlipTextModel',
'TFBlipForImageTextRetrieval',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 209 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class __UpperCAmelCase ( __A ):
"""simple docstring"""
_lowerCamelCase = 42
class __UpperCAmelCase ( __A , __A ):
"""simple docstring"""
@register_to_config
def __init__( self , __A = 3 , __A = 3 , __A = ("DownEncoderBlock2D",) , __A = ("UpDecoderBlock2D",) , __A = (64,) , __A = 1 , __A = "silu" , __A = 3 , __A = 32 , __A = 256 , __A = 32 , __A = None , __A = 0.18215 , __A = "group" , ):
super().__init__()
# pass init params to Encoder
__a = Encoder(
in_channels=__A , out_channels=__A , down_block_types=__A , block_out_channels=__A , layers_per_block=__A , act_fn=__A , norm_num_groups=__A , double_z=__A , )
__a = vq_embed_dim if vq_embed_dim is not None else latent_channels
__a = nn.Convad(__A , __A , 1 )
__a = VectorQuantizer(__A , __A , beta=0.25 , remap=__A , sane_index_shape=__A )
__a = nn.Convad(__A , __A , 1 )
# pass init params to Decoder
__a = Decoder(
in_channels=__A , out_channels=__A , up_block_types=__A , block_out_channels=__A , layers_per_block=__A , act_fn=__A , norm_num_groups=__A , norm_type=__A , )
@apply_forward_hook
def snake_case_ ( self , __A , __A = True ):
__a = self.encoder(__A )
__a = self.quant_conv(__A )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=__A )
@apply_forward_hook
def snake_case_ ( self , __A , __A = False , __A = True ):
# also go through quantization layer
if not force_not_quantize:
__a , __a , __a = self.quantize(__A )
else:
__a = h
__a = self.post_quant_conv(__A )
__a = self.decoder(__A , quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__A )
def snake_case_ ( self , __A , __A = True ):
__a = sample
__a = self.encode(__A ).latents
__a = self.decode(__A ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__A )
| 209 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
UpperCamelCase : str = {
"""EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json""",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class A__ ( A__ ):
"""simple docstring"""
_lowercase = 'gpt_neox'
def __init__( self : int , lowerCamelCase__ : int=50_432 , lowerCamelCase__ : Optional[int]=6_144 , lowerCamelCase__ : Optional[Any]=44 , lowerCamelCase__ : Tuple=64 , lowerCamelCase__ : Union[str, Any]=24_576 , lowerCamelCase__ : List[str]="gelu" , lowerCamelCase__ : Union[str, Any]=0.25 , lowerCamelCase__ : Optional[int]=10_000 , lowerCamelCase__ : Union[str, Any]=0.0 , lowerCamelCase__ : List[Any]=0.0 , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : Tuple=2_048 , lowerCamelCase__ : str=0.02 , lowerCamelCase__ : Any=1E-5 , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Optional[Any]=0 , lowerCamelCase__ : Tuple=2 , lowerCamelCase__ : Optional[Any]=False , lowerCamelCase__ : str=True , lowerCamelCase__ : Optional[int]=None , **lowerCamelCase__ : Tuple , ):
super().__init__(bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
a__ : int = vocab_size
a__ : str = max_position_embeddings
a__ : Optional[int] = hidden_size
a__ : List[str] = num_hidden_layers
a__ : Tuple = num_attention_heads
a__ : int = intermediate_size
a__ : Optional[int] = hidden_act
a__ : str = rotary_pct
a__ : Optional[Any] = rotary_emb_base
a__ : Any = attention_dropout
a__ : Dict = hidden_dropout
a__ : Optional[int] = classifier_dropout
a__ : Dict = initializer_range
a__ : int = layer_norm_eps
a__ : Tuple = use_cache
a__ : Optional[Any] = tie_word_embeddings
a__ : List[str] = use_parallel_residual
a__ : str = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"The hidden size is not divisble by the number of attention heads! Make sure to update them!" )
def _UpperCamelCase( self : Optional[Any] ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowerCamelCase__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f'''got {self.rope_scaling}''' )
a__ : Dict = self.rope_scaling.get("type" , lowerCamelCase__ )
a__ : List[str] = self.rope_scaling.get("factor" , lowerCamelCase__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 37 |
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
SCREAMING_SNAKE_CASE__ : List[str] = argparse.ArgumentParser("""Stable Diffusion script with intel optimization""", add_help=False)
parser.add_argument("""--dpm""", action="""store_true""", help="""Enable DPMSolver or not""")
parser.add_argument("""--steps""", default=None, type=int, help="""Num inference steps""")
SCREAMING_SNAKE_CASE__ : Tuple = parser.parse_args()
SCREAMING_SNAKE_CASE__ : int = """cpu"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = """a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"""
SCREAMING_SNAKE_CASE__ : List[Any] = """path-to-your-trained-model"""
SCREAMING_SNAKE_CASE__ : Tuple = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
SCREAMING_SNAKE_CASE__ : Optional[int] = pipe.to(device)
# to channels last
SCREAMING_SNAKE_CASE__ : int = pipe.unet.to(memory_format=torch.channels_last)
SCREAMING_SNAKE_CASE__ : List[Any] = pipe.vae.to(memory_format=torch.channels_last)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
SCREAMING_SNAKE_CASE__ : List[str] = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.randn(2, 4, 6_4, 6_4)
SCREAMING_SNAKE_CASE__ : Any = torch.rand(1) * 9_9_9
SCREAMING_SNAKE_CASE__ : List[Any] = torch.randn(2, 7_7, 7_6_8)
SCREAMING_SNAKE_CASE__ : Optional[Any] = (sample, timestep, encoder_hidden_status)
try:
SCREAMING_SNAKE_CASE__ : Any = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
SCREAMING_SNAKE_CASE__ : str = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
SCREAMING_SNAKE_CASE__ : Optional[Any] = 6_6_6
SCREAMING_SNAKE_CASE__ : Any = torch.Generator(device).manual_seed(seed)
SCREAMING_SNAKE_CASE__ : int = {"""generator""": generator}
if args.steps is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
SCREAMING_SNAKE_CASE__ : Dict = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("""generated.png""")
| 112 | 0 |
'''simple docstring'''
import math
def __UpperCamelCase ( _lowercase ) -> int:
if not isinstance(_lowercase, _lowercase ):
_lowercase : Any = f'''Input value of [number={number}] must be an integer'''
raise TypeError(_lowercase )
if number < 1:
_lowercase : Optional[int] = f'''Input value of [number={number}] must be > 0'''
raise ValueError(_lowercase )
elif number == 1:
return 3
elif number == 2:
return 5
else:
_lowercase : Union[str, Any] = int(math.log(number // 3, 2 ) ) + 2
_lowercase : Dict = [3, 5]
_lowercase : List[str] = 2
_lowercase : str = 3
for block in range(1, _lowercase ):
for _ in range(_lowercase ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(1_1):
_A : int =0
try:
_A : Union[str, Any] =proth(number)
except ValueError:
print(F'''ValueError: there is no {number}th Proth number''')
continue
print(F'''The {number}th Proth number: {value}''')
| 4 |
'''simple docstring'''
from __future__ import annotations
import requests
def __UpperCamelCase ( _lowercase ) -> dict:
_lowercase : Optional[int] = f'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'''
return requests.get(_lowercase ).json()
def __UpperCamelCase ( _lowercase = 10 ) -> list[dict]:
_lowercase : Union[str, Any] = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'
_lowercase : Optional[Any] = requests.get(_lowercase ).json()[:max_stories]
return [get_hackernews_story(_lowercase ) for story_id in story_ids]
def __UpperCamelCase ( _lowercase = 10 ) -> str:
_lowercase : Tuple = hackernews_top_stories(_lowercase )
return "\n".join('* [{title}]({url})'.format(**_lowercase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 4 | 1 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
__A : Any = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f'{bindir}/../../examples/pytorch/translation'):
from run_translation import main # noqa
set_seed(4_2)
__A : Tuple = 'sshleifer/student_marian_en_ro_6_1'
__A : List[Any] = 'sshleifer/tiny-mbart'
@require_torch
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
def _snake_case ( self : Any , __lowerCamelCase : List[Any]=False , __lowerCamelCase : int=None , __lowerCamelCase : List[str]=True , __lowerCamelCase : str=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Optional[int]=True , ):
SCREAMING_SNAKE_CASE = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=__lowerCamelCase , num_train_epochs=1 , distributed=__lowerCamelCase , extra_args_str=__lowerCamelCase , predict_with_generate=__lowerCamelCase , do_train=__lowerCamelCase , do_eval=__lowerCamelCase , do_predict=__lowerCamelCase , )
SCREAMING_SNAKE_CASE = TrainerState.load_from_json(os.path.join(__lowerCamelCase , "trainer_state.json" ) ).log_history
if not do_eval:
return
SCREAMING_SNAKE_CASE = [log for log in logs if "eval_loss" in log.keys()]
SCREAMING_SNAKE_CASE = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
SCREAMING_SNAKE_CASE = eval_metrics[-1]
assert isinstance(last_step_stats["eval_bleu"] , __lowerCamelCase )
assert not math.isnan(float(last_step_stats["eval_loss"] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def _snake_case ( self : List[str] ):
self.run_seqaseq_quick()
@require_torch_multi_gpu
def _snake_case ( self : str ):
self.run_seqaseq_quick(distributed=__lowerCamelCase )
@require_torch_multi_gpu
def _snake_case ( self : List[Any] ):
self.run_seqaseq_quick(distributed=__lowerCamelCase )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def _snake_case ( self : Union[str, Any] ):
self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--sharded_ddp simple" )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def _snake_case ( self : Dict ):
self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--sharded_ddp simple --fp16" )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def _snake_case ( self : str ):
self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--sharded_ddp zero_dp_2" , predict_with_generate=__lowerCamelCase )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def _snake_case ( self : Optional[int] ):
self.run_seqaseq_quick(
distributed=__lowerCamelCase , extra_args_str="--sharded_ddp zero_dp_2 --fp16" , predict_with_generate=__lowerCamelCase )
@require_apex
@require_torch_gpu
def _snake_case ( self : Tuple ):
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--fp16 --fp16_backend=apex" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--fp16 --fp16_backend=apex" )
@parameterized.expand(["base", "low", "high", "mixed"] )
@require_torch_multi_gpu
def _snake_case ( self : Any , __lowerCamelCase : str ):
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
SCREAMING_SNAKE_CASE = {
# test with the default log_level - should be info and thus log info once
"base": {"extra_args_str": "", "n_matches": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"low": {"extra_args_str": "--log_level debug --log_level_replica debug", "n_matches": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"high": {"extra_args_str": "--log_level error --log_level_replica debug", "n_matches": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"mixed": {"extra_args_str": "--log_level error --log_level_replica error", "n_matches": 0},
}
SCREAMING_SNAKE_CASE = experiments[experiment_id]
SCREAMING_SNAKE_CASE = {"distributed": True, "predict_with_generate": False, "do_eval": False, "do_predict": False}
SCREAMING_SNAKE_CASE = "Running training"
with CaptureStderr() as cl:
self.run_seqaseq_quick(**__lowerCamelCase , extra_args_str=data["extra_args_str"] )
SCREAMING_SNAKE_CASE = len(re.findall(__lowerCamelCase , cl.err ) )
self.assertEqual(__lowerCamelCase , data["n_matches"] )
@slow
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE = self.run_trainer(
eval_steps=2 , max_len=128 , model_name=__lowerCamelCase , learning_rate=3e-4 , num_train_epochs=10 , distributed=__lowerCamelCase , )
# Check metrics
SCREAMING_SNAKE_CASE = TrainerState.load_from_json(os.path.join(__lowerCamelCase , "trainer_state.json" ) ).log_history
SCREAMING_SNAKE_CASE = [log for log in logs if "eval_loss" in log.keys()]
SCREAMING_SNAKE_CASE = eval_metrics[0]
SCREAMING_SNAKE_CASE = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["eval_bleu"] , __lowerCamelCase )
# test if do_predict saves generations and metrics
SCREAMING_SNAKE_CASE = os.listdir(__lowerCamelCase )
SCREAMING_SNAKE_CASE = {os.path.basename(__lowerCamelCase ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def _snake_case ( self : List[str] ):
from transformers.training_args import OptimizerNames
def train_and_return_metrics(__lowerCamelCase : str ) -> Tuple[int, float]:
SCREAMING_SNAKE_CASE = "--skip_memory_metrics 0"
SCREAMING_SNAKE_CASE = self.run_trainer(
max_len=128 , model_name=__lowerCamelCase , learning_rate=3e-4 , num_train_epochs=1 , optim=__lowerCamelCase , distributed=__lowerCamelCase , extra_args_str=__lowerCamelCase , do_eval=__lowerCamelCase , do_predict=__lowerCamelCase , n_gpus_to_use=1 , )
# Check metrics
SCREAMING_SNAKE_CASE = TrainerState.load_from_json(Path(__lowerCamelCase , "trainer_state.json" ) ).log_history
SCREAMING_SNAKE_CASE = int(logs[0]["train_mem_gpu_peaked_delta"] / 2**20 )
SCREAMING_SNAKE_CASE = int(logs[0]["train_mem_gpu_alloc_delta"] / 2**20 )
SCREAMING_SNAKE_CASE = logs[0]["train_loss"]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
SCREAMING_SNAKE_CASE = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
SCREAMING_SNAKE_CASE = gpu_peak_mem_orig + gpu_alloc_mem_orig
SCREAMING_SNAKE_CASE = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
SCREAMING_SNAKE_CASE = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
SCREAMING_SNAKE_CASE = 120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
__lowerCamelCase , __lowerCamelCase , "should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"
f" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"
f" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB" , )
self.assertGreater(
__lowerCamelCase , __lowerCamelCase , "should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"
f" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"
f" gpu_total_mem_bnb={gpu_total_mem_bnb}MB" , )
self.assertEqual(
__lowerCamelCase , __lowerCamelCase , f"loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}" )
def _snake_case ( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : float = 3e-3 , __lowerCamelCase : str = "adafactor" , __lowerCamelCase : bool = False , __lowerCamelCase : str = None , __lowerCamelCase : int = 0 , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : int = None , ):
SCREAMING_SNAKE_CASE = self.test_file_dir / "../fixtures/tests_samples/wmt_en_ro"
SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE = f"\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(__lowerCamelCase )}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(__lowerCamelCase )}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n ".split()
SCREAMING_SNAKE_CASE = f"\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(__lowerCamelCase )}\n ".split()
SCREAMING_SNAKE_CASE = "\n --do_predict\n ".split()
SCREAMING_SNAKE_CASE = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f"--optim {optim}".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
SCREAMING_SNAKE_CASE = get_gpu_count()
SCREAMING_SNAKE_CASE = get_torch_dist_unique_port()
SCREAMING_SNAKE_CASE = f"\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n ".split()
SCREAMING_SNAKE_CASE = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__lowerCamelCase , env=self.get_env() )
else:
SCREAMING_SNAKE_CASE = ["run_translation.py"] + args
with patch.object(__lowerCamelCase , "argv" , __lowerCamelCase ):
main()
return output_dir | 16 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def __a ( A__ : str , A__ : List[Any]=None ):
SCREAMING_SNAKE_CASE = None
if token is not None:
SCREAMING_SNAKE_CASE = {"Accept": "application/vnd.github+json", "Authorization": F"Bearer {token}"}
SCREAMING_SNAKE_CASE = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
SCREAMING_SNAKE_CASE = requests.get(A__ , headers=A__ ).json()
SCREAMING_SNAKE_CASE = {}
try:
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
SCREAMING_SNAKE_CASE = math.ceil((result["total_count"] - 100) / 100 )
for i in range(A__ ):
SCREAMING_SNAKE_CASE = requests.get(url + F"&page={i + 2}" , headers=A__ ).json()
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return job_links
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def __a ( A__ : List[Any] , A__ : Optional[int]=None ):
SCREAMING_SNAKE_CASE = None
if token is not None:
SCREAMING_SNAKE_CASE = {"Accept": "application/vnd.github+json", "Authorization": F"Bearer {token}"}
SCREAMING_SNAKE_CASE = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"
SCREAMING_SNAKE_CASE = requests.get(A__ , headers=A__ ).json()
SCREAMING_SNAKE_CASE = {}
try:
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
SCREAMING_SNAKE_CASE = math.ceil((result["total_count"] - 100) / 100 )
for i in range(A__ ):
SCREAMING_SNAKE_CASE = requests.get(url + F"&page={i + 2}" , headers=A__ ).json()
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
return artifacts
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def __a ( A__ : Any , A__ : str , A__ : List[str] , A__ : Union[str, Any] ):
SCREAMING_SNAKE_CASE = None
if token is not None:
SCREAMING_SNAKE_CASE = {"Accept": "application/vnd.github+json", "Authorization": F"Bearer {token}"}
SCREAMING_SNAKE_CASE = requests.get(A__ , headers=A__ , allow_redirects=A__ )
SCREAMING_SNAKE_CASE = result.headers["Location"]
SCREAMING_SNAKE_CASE = requests.get(A__ , allow_redirects=A__ )
SCREAMING_SNAKE_CASE = os.path.join(A__ , F"{artifact_name}.zip" )
with open(A__ , "wb" ) as fp:
fp.write(response.content )
def __a ( A__ : List[Any] , A__ : List[Any]=None ):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = None
with zipfile.ZipFile(A__ ) as z:
for filename in z.namelist():
if not os.path.isdir(A__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(A__ ) as f:
for line in f:
SCREAMING_SNAKE_CASE = line.decode("UTF-8" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
SCREAMING_SNAKE_CASE = line[: line.index(": " )]
SCREAMING_SNAKE_CASE = line[line.index(": " ) + len(": " ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("FAILED " ):
# `test` is the test method that failed
SCREAMING_SNAKE_CASE = line[len("FAILED " ) :]
failed_tests.append(A__ )
elif filename == "job_name.txt":
SCREAMING_SNAKE_CASE = line
if len(A__ ) != len(A__ ):
raise ValueError(
F"`errors` and `failed_tests` should have the same number of elements. Got {len(A__ )} for `errors` "
F"and {len(A__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"
" problem." )
SCREAMING_SNAKE_CASE = None
if job_name and job_links:
SCREAMING_SNAKE_CASE = job_links.get(A__ , A__ )
# A list with elements of the form (line of error, error, failed test)
SCREAMING_SNAKE_CASE = [x + [y] + [job_link] for x, y in zip(A__ , A__ )]
return result
def __a ( A__ : Union[str, Any] , A__ : Union[str, Any]=None ):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = [os.path.join(A__ , A__ ) for p in os.listdir(A__ ) if p.endswith(".zip" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(A__ , job_links=A__ ) )
return errors
def __a ( A__ : List[str] , A__ : Tuple=None ):
SCREAMING_SNAKE_CASE = Counter()
counter.update([x[1] for x in logs] )
SCREAMING_SNAKE_CASE = counter.most_common()
SCREAMING_SNAKE_CASE = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
SCREAMING_SNAKE_CASE = {"count": count, "failed_tests": [(x[2], x[0]) for x in logs if x[1] == error]}
SCREAMING_SNAKE_CASE = dict(sorted(r.items() , key=lambda A__ : item[1]["count"] , reverse=A__ ) )
return r
def __a ( A__ : str ):
SCREAMING_SNAKE_CASE = test.split("::" )[0]
if test.startswith("tests/models/" ):
SCREAMING_SNAKE_CASE = test.split("/" )[2]
else:
SCREAMING_SNAKE_CASE = None
return test
def __a ( A__ : List[str] , A__ : Dict=None ):
SCREAMING_SNAKE_CASE = [(x[0], x[1], get_model(x[2] )) for x in logs]
SCREAMING_SNAKE_CASE = [x for x in logs if x[2] is not None]
SCREAMING_SNAKE_CASE = {x[2] for x in logs}
SCREAMING_SNAKE_CASE = {}
for test in tests:
SCREAMING_SNAKE_CASE = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
SCREAMING_SNAKE_CASE = counter.most_common()
SCREAMING_SNAKE_CASE = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
SCREAMING_SNAKE_CASE = sum(error_counts.values() )
if n_errors > 0:
SCREAMING_SNAKE_CASE = {"count": n_errors, "errors": error_counts}
SCREAMING_SNAKE_CASE = dict(sorted(r.items() , key=lambda A__ : item[1]["count"] , reverse=A__ ) )
return r
def __a ( A__ : Dict ):
SCREAMING_SNAKE_CASE = "| no. | error | status |"
SCREAMING_SNAKE_CASE = "|-:|:-|:-|"
SCREAMING_SNAKE_CASE = [header, sep]
for error in reduced_by_error:
SCREAMING_SNAKE_CASE = reduced_by_error[error]["count"]
SCREAMING_SNAKE_CASE = F"| {count} | {error[:100]} | |"
lines.append(A__ )
return "\n".join(A__ )
def __a ( A__ : Optional[Any] ):
SCREAMING_SNAKE_CASE = "| model | no. of errors | major error | count |"
SCREAMING_SNAKE_CASE = "|-:|-:|-:|-:|"
SCREAMING_SNAKE_CASE = [header, sep]
for model in reduced_by_model:
SCREAMING_SNAKE_CASE = reduced_by_model[model]["count"]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = list(reduced_by_model[model]["errors"].items() )[0]
SCREAMING_SNAKE_CASE = F"| {model} | {count} | {error[:60]} | {_count} |"
lines.append(A__ )
return "\n".join(A__ )
if __name__ == "__main__":
__A : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
__A : Union[str, Any] = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
__A : int = get_job_links(args.workflow_run_id, token=args.token)
__A : Dict = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
__A : Union[str, Any] = k.find(' / ')
__A : Optional[int] = k[index + len(' / ') :]
__A : Optional[int] = v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
__A : int = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
__A : Optional[int] = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
__A : Dict = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
__A : Optional[Any] = counter.most_common(3_0)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
__A : str = reduce_by_error(errors)
__A : int = reduce_by_model(errors)
__A : Any = make_github_table(reduced_by_error)
__A : List[str] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa) | 16 | 1 |
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class __UpperCamelCase (snake_case__ ):
__A = 42
__A = None
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[int] , lowercase_ : Optional[Any]=0.999 , lowercase_ : List[Any]="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowercase_ : Optional[int] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowercase_ : Optional[int] ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
lowercase = []
for i in range(_SCREAMING_SNAKE_CASE ):
lowercase = i / num_diffusion_timesteps
lowercase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_SCREAMING_SNAKE_CASE ) / alpha_bar_fn(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) )
return torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.floataa )
class __UpperCamelCase (snake_case__ , snake_case__ ):
@register_to_config
def __init__( self , _lowerCAmelCase = 1000 , _lowerCAmelCase = "fixed_small_log" , _lowerCAmelCase = True , _lowerCAmelCase = 1.0 , _lowerCAmelCase = "epsilon" , _lowerCAmelCase = "squaredcos_cap_v2" , ) -> Optional[int]:
'''simple docstring'''
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("""UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'""" )
lowercase = betas_for_alpha_bar(UpperCAmelCase_ )
lowercase = 1.0 - self.betas
lowercase = torch.cumprod(self.alphas , dim=0 )
lowercase = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
lowercase = 1.0
# setable values
lowercase = None
lowercase = torch.from_numpy(np.arange(0 , UpperCAmelCase_ )[::-1].copy() )
lowercase = variance_type
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> Tuple:
'''simple docstring'''
lowercase = num_inference_steps
lowercase = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
lowercase = (np.arange(0 , UpperCAmelCase_ ) * step_ratio).round()[::-1].copy().astype(np.intaa )
lowercase = torch.from_numpy(UpperCAmelCase_ ).to(UpperCAmelCase_ )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> str:
'''simple docstring'''
if prev_timestep is None:
lowercase = t - 1
lowercase = self.alphas_cumprod[t]
lowercase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
lowercase = 1 - alpha_prod_t
lowercase = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
lowercase = self.betas[t]
else:
lowercase = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowercase = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
lowercase = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
lowercase = torch.log(torch.clamp(UpperCAmelCase_ , min=1E-20 ) )
lowercase = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
lowercase = variance.log()
lowercase = beta.log()
lowercase = (predicted_variance + 1) / 2
lowercase = frac * max_log + (1 - frac) * min_log
return variance
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase=None , _lowerCAmelCase = True , ) -> Union[UnCLIPSchedulerOutput, Tuple]:
'''simple docstring'''
lowercase = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
lowercase , lowercase = torch.split(UpperCAmelCase_ , sample.shape[1] , dim=1 )
else:
lowercase = None
# 1. compute alphas, betas
if prev_timestep is None:
lowercase = t - 1
lowercase = self.alphas_cumprod[t]
lowercase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
lowercase = 1 - alpha_prod_t
lowercase = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
lowercase = self.betas[t]
lowercase = self.alphas[t]
else:
lowercase = 1 - alpha_prod_t / alpha_prod_t_prev
lowercase = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowercase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowercase = model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"""
""" for the UnCLIPScheduler.""" )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowercase = torch.clamp(
UpperCAmelCase_ , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
lowercase = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
lowercase = 0
if t > 0:
lowercase = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=UpperCAmelCase_ , device=model_output.device )
lowercase = self._get_variance(
UpperCAmelCase_ , predicted_variance=UpperCAmelCase_ , prev_timestep=UpperCAmelCase_ , )
if self.variance_type == "fixed_small_log":
lowercase = variance
elif self.variance_type == "learned_range":
lowercase = (0.5 * variance).exp()
else:
raise ValueError(
F"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"""
""" for the UnCLIPScheduler.""" )
lowercase = variance * variance_noise
lowercase = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=UpperCAmelCase_ , pred_original_sample=UpperCAmelCase_ )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> torch.FloatTensor:
'''simple docstring'''
lowercase = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
lowercase = timesteps.to(original_samples.device )
lowercase = alphas_cumprod[timesteps] ** 0.5
lowercase = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
lowercase = sqrt_alpha_prod.unsqueeze(-1 )
lowercase = (1 - alphas_cumprod[timesteps]) ** 0.5
lowercase = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
lowercase = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
lowercase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 702 |
'''simple docstring'''
from ... import PretrainedConfig
lowercase_ : int = {
'''sijunhe/nezha-cn-base''': '''https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json''',
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
__A = '''nezha'''
def __init__( self , _lowerCAmelCase=2_1128 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=64 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0 , _lowerCAmelCase=2 , _lowerCAmelCase=3 , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = hidden_act
lowercase = intermediate_size
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = max_relative_position
lowercase = type_vocab_size
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = classifier_dropout
lowercase = use_cache
| 653 | 0 |
import numpy as np
def A__ ( snake_case_ : str , snake_case_ : List[str] , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Optional[int] ):
SCREAMING_SNAKE_CASE__: List[Any]= int(np.ceil((x_end - xa) / h ) )
SCREAMING_SNAKE_CASE__: Any= np.zeros((n + 1,) )
SCREAMING_SNAKE_CASE__: int= ya
SCREAMING_SNAKE_CASE__: Tuple= xa
for k in range(snake_case_ ):
SCREAMING_SNAKE_CASE__: Any= f(snake_case_ , y[k] )
SCREAMING_SNAKE_CASE__: Optional[int]= f(x + 0.5 * h , y[k] + 0.5 * h * ka )
SCREAMING_SNAKE_CASE__: Tuple= f(x + 0.5 * h , y[k] + 0.5 * h * ka )
SCREAMING_SNAKE_CASE__: List[str]= f(x + h , y[k] + h * ka )
SCREAMING_SNAKE_CASE__: Tuple= y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 |
import operator as op
UpperCamelCase = 'scaler.pt'
UpperCamelCase = 'pytorch_model'
UpperCamelCase = 'random_states'
UpperCamelCase = 'optimizer'
UpperCamelCase = 'scheduler'
UpperCamelCase = 'pytorch_model.bin'
UpperCamelCase = 'pytorch_model.bin.index.json'
UpperCamelCase = 'model.safetensors'
UpperCamelCase = 'model.safetensors.index.json'
UpperCamelCase = '1.10.2'
UpperCamelCase = 'py38'
UpperCamelCase = '4.17.0'
UpperCamelCase = ['ml.p3.16xlarge', 'ml.p3dn.24xlarge', 'ml.p4dn.24xlarge']
UpperCamelCase = ['FULL_SHARD', 'SHARD_GRAD_OP', 'NO_SHARD', 'HYBRID_SHARD', 'HYBRID_SHARD_ZERO2']
UpperCamelCase = ['TRANSFORMER_BASED_WRAP', 'SIZE_BASED_WRAP', 'NO_WRAP']
UpperCamelCase = ['BACKWARD_PRE', 'BACKWARD_POST', 'NO_PREFETCH']
UpperCamelCase = ['FULL_STATE_DICT', 'LOCAL_STATE_DICT', 'SHARDED_STATE_DICT']
UpperCamelCase = '2.0.1'
UpperCamelCase = ['pdsh', 'standard', 'openmpi', 'mvapich']
UpperCamelCase = ['default', 'reduce-overhead', 'max-autotune']
UpperCamelCase = {'>': op.gt, '>=': op.ge, '==': op.eq, '!=': op.ne, '<=': op.le, '<': op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
UpperCamelCase = [
'nnodes',
'nproc_per_node',
'rdzv_backend',
'rdzv_endpoint',
'rdzv_id',
'rdzv_conf',
'standalone',
'max_restarts',
'monitor_interval',
'start_method',
'role',
'module',
'm',
'no_python',
'run_path',
'log_dir',
'r',
'redirects',
't',
'tee',
'node_rank',
'master_addr',
'master_port',
]
UpperCamelCase = ['DEEPSPEED', 'MULTI_GPU', 'FSDP', 'MEGATRON_LM']
UpperCamelCase = ['DEEPSPEED', 'MULTI_XPU', 'FSDP']
| 61 | 0 |
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class _lowerCAmelCase :
def __init__( self , _UpperCamelCase ) -> Any:
lowerCAmelCase_ = data
lowerCAmelCase_ = [0x6_7_4_5_2_3_0_1, 0xE_F_C_D_A_B_8_9, 0x9_8_B_A_D_C_F_E, 0x1_0_3_2_5_4_7_6, 0xC_3_D_2_E_1_F_0]
@staticmethod
def __a ( _UpperCamelCase , _UpperCamelCase ) -> Dict:
return ((n << b) | (n >> (32 - b))) & 0xF_F_F_F_F_F_F_F
def __a ( self ) -> Any:
lowerCAmelCase_ = B"\x80" + B"\x00" * (63 - (len(self.data ) + 8) % 64)
lowerCAmelCase_ = self.data + padding + struct.pack(">Q" , 8 * len(self.data ) )
return padded_data
def __a ( self ) -> Union[str, Any]:
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def __a ( self , _UpperCamelCase ) -> List[str]:
lowerCAmelCase_ = list(struct.unpack(">16L" , _UpperCamelCase ) ) + [0] * 64
for i in range(16 , 80 ):
lowerCAmelCase_ = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = self.padding()
lowerCAmelCase_ = self.split_blocks()
for block in self.blocks:
lowerCAmelCase_ = self.expand_block(_UpperCamelCase )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
lowerCAmelCase_ = (b & c) | ((~b) & d)
lowerCAmelCase_ = 0x5_A_8_2_7_9_9_9
elif 20 <= i < 40:
lowerCAmelCase_ = b ^ c ^ d
lowerCAmelCase_ = 0x6_E_D_9_E_B_A_1
elif 40 <= i < 60:
lowerCAmelCase_ = (b & c) | (b & d) | (c & d)
lowerCAmelCase_ = 0x8_F_1_B_B_C_D_C
elif 60 <= i < 80:
lowerCAmelCase_ = b ^ c ^ d
lowerCAmelCase_ = 0xC_A_6_2_C_1_D_6
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = (
self.rotate(_UpperCamelCase , 5 ) + f + e + k + expanded_block[i] & 0xF_F_F_F_F_F_F_F,
a,
self.rotate(_UpperCamelCase , 30 ),
c,
d,
)
lowerCAmelCase_ = (
self.h[0] + a & 0xF_F_F_F_F_F_F_F,
self.h[1] + b & 0xF_F_F_F_F_F_F_F,
self.h[2] + c & 0xF_F_F_F_F_F_F_F,
self.h[3] + d & 0xF_F_F_F_F_F_F_F,
self.h[4] + e & 0xF_F_F_F_F_F_F_F,
)
return ("{:08x}" * 5).format(*self.h )
def lowerCamelCase__ ( ):
"""simple docstring"""
lowerCAmelCase_ = B"Test String"
assert SHAaHash(__lowerCAmelCase ).final_hash() == hashlib.shaa(__lowerCAmelCase ).hexdigest() # noqa: S324
def lowerCamelCase__ ( ):
"""simple docstring"""
lowerCAmelCase_ = argparse.ArgumentParser(description="Process some strings or files" )
parser.add_argument(
"--string" , dest="input_string" , default="Hello World!! Welcome to Cryptography" , help="Hash the string" , )
parser.add_argument("--file" , dest="input_file" , help="Hash contents of a file" )
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , "rb" ) as f:
lowerCAmelCase_ = f.read()
else:
lowerCAmelCase_ = bytes(__lowerCAmelCase , "utf-8" )
print(SHAaHash(__lowerCAmelCase ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 711 |
def lowerCamelCase__ ( __lowerCAmelCase : int ):
"""simple docstring"""
if upper_limit < 0:
raise ValueError("Limit for the Catalan sequence must be ≥ 0" )
lowerCAmelCase_ = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
lowerCAmelCase_ = 1
if upper_limit > 0:
lowerCAmelCase_ = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(__lowerCAmelCase ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("\n********* Catalan Numbers Using Dynamic Programming ************\n")
print("\n*** Enter -1 at any time to quit ***")
print("\nEnter the upper limit (≥ 0) for the Catalan number sequence: ", end="")
try:
while True:
_A = int(input().strip())
if N < 0:
print("\n********* Goodbye!! ************")
break
else:
print(f"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print("Try another upper limit for the sequence: ", end="")
except (NameError, ValueError):
print("\n********* Invalid input, goodbye! ************\n")
import doctest
doctest.testmod()
| 279 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCamelCase : Optional[Any] = {
'configuration_mvp': ['MVP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MvpConfig', 'MvpOnnxConfig'],
'tokenization_mvp': ['MvpTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = ['MvpTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
'MVP_PRETRAINED_MODEL_ARCHIVE_LIST',
'MvpForCausalLM',
'MvpForConditionalGeneration',
'MvpForQuestionAnswering',
'MvpForSequenceClassification',
'MvpModel',
'MvpPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 248 | from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__UpperCamelCase : Any = logging.get_logger(__name__)
def A ( _lowercase ):
if isinstance(_lowercase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(_lowercase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(_lowercase ):
return [[videos]]
raise ValueError(f"""Could not make batched video from {videos}""" )
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = ["""pixel_values"""]
def __init__( self : Dict , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , **UpperCamelCase__ : Tuple , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = size if size is not None else {'''shortest_edge''': 224}
SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
SCREAMING_SNAKE_CASE : Tuple = get_size_dict(UpperCamelCase__ , param_name='''crop_size''' )
SCREAMING_SNAKE_CASE : Optional[int] = do_resize
SCREAMING_SNAKE_CASE : str = size
SCREAMING_SNAKE_CASE : List[str] = do_center_crop
SCREAMING_SNAKE_CASE : List[Any] = crop_size
SCREAMING_SNAKE_CASE : Dict = resample
SCREAMING_SNAKE_CASE : Any = do_rescale
SCREAMING_SNAKE_CASE : Dict = rescale_factor
SCREAMING_SNAKE_CASE : Optional[int] = do_normalize
SCREAMING_SNAKE_CASE : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __A ( self : Union[str, Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
if "shortest_edge" in size:
SCREAMING_SNAKE_CASE : Optional[int] = get_resize_output_image_size(UpperCamelCase__ , size['''shortest_edge'''] , default_to_square=UpperCamelCase__ )
elif "height" in size and "width" in size:
SCREAMING_SNAKE_CASE : Any = (size['''height'''], size['''width'''])
else:
raise ValueError(f"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : int , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : int , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(UpperCamelCase__ , size=(size['''height'''], size['''width''']) , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : List[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : List[Any] , ):
'''simple docstring'''
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : str , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : int , ):
'''simple docstring'''
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : Dict , UpperCamelCase__ : ImageInput , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : float = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[ChannelDimension] = ChannelDimension.FIRST , ):
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE : Optional[Any] = to_numpy_array(UpperCamelCase__ )
if do_resize:
SCREAMING_SNAKE_CASE : str = self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ )
if do_center_crop:
SCREAMING_SNAKE_CASE : Optional[int] = self.center_crop(UpperCamelCase__ , size=UpperCamelCase__ )
if do_rescale:
SCREAMING_SNAKE_CASE : Dict = self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ )
if do_normalize:
SCREAMING_SNAKE_CASE : int = self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ )
return image
def __A ( self : List[str] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : float = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE : Any = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE : str = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE : str = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE : Optional[Any] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE : Any = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE : Any = size if size is not None else self.size
SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE : int = get_size_dict(UpperCamelCase__ , param_name='''crop_size''' )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
SCREAMING_SNAKE_CASE : int = make_batched(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = [
[
self._preprocess_image(
image=UpperCamelCase__ , do_resize=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , do_center_crop=UpperCamelCase__ , crop_size=UpperCamelCase__ , do_rescale=UpperCamelCase__ , rescale_factor=UpperCamelCase__ , do_normalize=UpperCamelCase__ , image_mean=UpperCamelCase__ , image_std=UpperCamelCase__ , data_format=UpperCamelCase__ , )
for img in video
]
for video in videos
]
SCREAMING_SNAKE_CASE : List[Any] = {'''pixel_values''': videos}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 248 | 1 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase__ = {
"vocab_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/vocab.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/vocab.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/vocab.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/vocab.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/vocab.json",
},
"merges_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/merges.txt",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/merges.txt",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/merges.txt",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/merges.txt",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/merges.txt",
},
"tokenizer_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/tokenizer.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/tokenizer.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/tokenizer.json",
},
}
UpperCamelCase__ = {
"gpt2": 1_024,
"gpt2-medium": 1_024,
"gpt2-large": 1_024,
"gpt2-xl": 1_024,
"distilgpt2": 1_024,
}
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : Dict = VOCAB_FILES_NAMES
snake_case : Dict = PRETRAINED_VOCAB_FILES_MAP
snake_case : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case : Union[str, Any] = ["""input_ids""", """attention_mask"""]
snake_case : Tuple = GPTaTokenizer
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase="<|endoftext|>" , __lowerCAmelCase="<|endoftext|>" , __lowerCAmelCase="<|endoftext|>" , __lowerCAmelCase=False , **__lowerCAmelCase , ):
super().__init__(
__lowerCAmelCase , __lowerCAmelCase , tokenizer_file=__lowerCAmelCase , unk_token=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , **__lowerCAmelCase , )
UpperCamelCase__ = kwargs.pop("""add_bos_token""" , __lowerCAmelCase )
UpperCamelCase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __lowerCAmelCase ) != add_prefix_space:
UpperCamelCase__ = getattr(__lowerCAmelCase , pre_tok_state.pop("""type""" ) )
UpperCamelCase__ = add_prefix_space
UpperCamelCase__ = pre_tok_class(**__lowerCAmelCase )
UpperCamelCase__ = add_prefix_space
def _lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
UpperCamelCase__ = kwargs.get("""is_split_into_words""" , __lowerCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__lowerCAmelCase , **__lowerCAmelCase )
def _lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
UpperCamelCase__ = kwargs.get("""is_split_into_words""" , __lowerCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__lowerCAmelCase , **__lowerCAmelCase )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
UpperCamelCase__ = self._tokenizer.model.save(__lowerCAmelCase , name=__lowerCAmelCase )
return tuple(__lowerCAmelCase )
def _lowerCamelCase ( self , __lowerCAmelCase ):
UpperCamelCase__ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) + [self.eos_token_id] )
if len(__lowerCAmelCase ) > self.model_max_length:
UpperCamelCase__ = input_ids[-self.model_max_length :]
return input_ids
| 548 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class __SCREAMING_SNAKE_CASE :
snake_case : Dict = PegasusConfig
snake_case : Any = {}
snake_case : int = """gelu"""
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=13 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=99 , __lowerCAmelCase=32 , __lowerCAmelCase=2 , __lowerCAmelCase=4 , __lowerCAmelCase=37 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=40 , __lowerCAmelCase=2 , __lowerCAmelCase=1 , __lowerCAmelCase=0 , ):
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = eos_token_id
UpperCamelCase__ = pad_token_id
UpperCamelCase__ = bos_token_id
def _lowerCamelCase ( self ):
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCamelCase__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCamelCase__ = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCamelCase__ = prepare_pegasus_inputs_dict(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return config, inputs_dict
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = TFPegasusModel(config=__lowerCAmelCase ).get_decoder()
UpperCamelCase__ = inputs_dict["""input_ids"""]
UpperCamelCase__ = input_ids[:1, :]
UpperCamelCase__ = inputs_dict["""attention_mask"""][:1, :]
UpperCamelCase__ = inputs_dict["""head_mask"""]
UpperCamelCase__ = 1
# first forward pass
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , head_mask=__lowerCAmelCase , use_cache=__lowerCAmelCase )
UpperCamelCase__ , UpperCamelCase__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCamelCase__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCamelCase__ = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCamelCase__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )[0]
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCamelCase__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCamelCase__ = output_from_no_past[:, -3:, random_slice_idx]
UpperCamelCase__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , rtol=1E-3 )
def _UpperCamelCase (a__ :List[str] , a__ :Any , a__ :str , a__ :Optional[int]=None , a__ :Union[str, Any]=None , a__ :Optional[int]=None , a__ :Optional[int]=None , a__ :List[str]=None , ):
"""simple docstring"""
if attention_mask is None:
UpperCamelCase__ = tf.cast(tf.math.not_equal(a__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCamelCase__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCamelCase__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCamelCase__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCamelCase__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
snake_case : Optional[int] = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
snake_case : Dict = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
snake_case : Optional[int] = (
{
"""conversational""": TFPegasusForConditionalGeneration,
"""feature-extraction""": TFPegasusModel,
"""summarization""": TFPegasusForConditionalGeneration,
"""text2text-generation""": TFPegasusForConditionalGeneration,
"""translation""": TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
snake_case : Optional[Any] = True
snake_case : int = False
snake_case : int = False
def _lowerCamelCase ( self ):
UpperCamelCase__ = TFPegasusModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=__lowerCAmelCase )
def _lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__lowerCAmelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
snake_case : str = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
snake_case : List[Any] = [
"""California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to"""
""" reduce the risk of wildfires.""",
"""N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.""",
] # differs slightly from pytorch, likely due to numerical differences in linear layers
snake_case : Dict = """google/pegasus-xsum"""
@cached_property
def _lowerCamelCase ( self ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _lowerCamelCase ( self ):
UpperCamelCase__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _lowerCamelCase ( self , **__lowerCAmelCase ):
UpperCamelCase__ = self.translate_src_text(**__lowerCAmelCase )
assert self.expected_text == generated_words
def _lowerCamelCase ( self , **__lowerCAmelCase ):
UpperCamelCase__ = self.tokenizer(self.src_text , **__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors="""tf""" )
UpperCamelCase__ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__lowerCAmelCase , )
UpperCamelCase__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__lowerCAmelCase )
return generated_words
@slow
def _lowerCamelCase ( self ):
self._assert_generated_batch_equal_expected()
| 548 | 1 |
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def __UpperCAmelCase ( __A , __A , __A = 1 / sqrt(2 ) ) -> IIRFilter:
'''simple docstring'''
UpperCAmelCase__ = tau * frequency / samplerate
UpperCAmelCase__ = sin(__A )
UpperCAmelCase__ = cos(__A )
UpperCAmelCase__ = _sin / (2 * q_factor)
UpperCAmelCase__ = (1 - _cos) / 2
UpperCAmelCase__ = 1 - _cos
UpperCAmelCase__ = 1 + alpha
UpperCAmelCase__ = -2 * _cos
UpperCAmelCase__ = 1 - alpha
UpperCAmelCase__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __UpperCAmelCase ( __A , __A , __A = 1 / sqrt(2 ) ) -> IIRFilter:
'''simple docstring'''
UpperCAmelCase__ = tau * frequency / samplerate
UpperCAmelCase__ = sin(__A )
UpperCAmelCase__ = cos(__A )
UpperCAmelCase__ = _sin / (2 * q_factor)
UpperCAmelCase__ = (1 + _cos) / 2
UpperCAmelCase__ = -1 - _cos
UpperCAmelCase__ = 1 + alpha
UpperCAmelCase__ = -2 * _cos
UpperCAmelCase__ = 1 - alpha
UpperCAmelCase__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __UpperCAmelCase ( __A , __A , __A = 1 / sqrt(2 ) ) -> IIRFilter:
'''simple docstring'''
UpperCAmelCase__ = tau * frequency / samplerate
UpperCAmelCase__ = sin(__A )
UpperCAmelCase__ = cos(__A )
UpperCAmelCase__ = _sin / (2 * q_factor)
UpperCAmelCase__ = _sin / 2
UpperCAmelCase__ = 0
UpperCAmelCase__ = -ba
UpperCAmelCase__ = 1 + alpha
UpperCAmelCase__ = -2 * _cos
UpperCAmelCase__ = 1 - alpha
UpperCAmelCase__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __UpperCAmelCase ( __A , __A , __A = 1 / sqrt(2 ) ) -> IIRFilter:
'''simple docstring'''
UpperCAmelCase__ = tau * frequency / samplerate
UpperCAmelCase__ = sin(__A )
UpperCAmelCase__ = cos(__A )
UpperCAmelCase__ = _sin / (2 * q_factor)
UpperCAmelCase__ = 1 - alpha
UpperCAmelCase__ = -2 * _cos
UpperCAmelCase__ = 1 + alpha
UpperCAmelCase__ = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def __UpperCAmelCase ( __A , __A , __A , __A = 1 / sqrt(2 ) , ) -> IIRFilter:
'''simple docstring'''
UpperCAmelCase__ = tau * frequency / samplerate
UpperCAmelCase__ = sin(__A )
UpperCAmelCase__ = cos(__A )
UpperCAmelCase__ = _sin / (2 * q_factor)
UpperCAmelCase__ = 1_0 ** (gain_db / 4_0)
UpperCAmelCase__ = 1 + alpha * big_a
UpperCAmelCase__ = -2 * _cos
UpperCAmelCase__ = 1 - alpha * big_a
UpperCAmelCase__ = 1 + alpha / big_a
UpperCAmelCase__ = -2 * _cos
UpperCAmelCase__ = 1 - alpha / big_a
UpperCAmelCase__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __UpperCAmelCase ( __A , __A , __A , __A = 1 / sqrt(2 ) , ) -> IIRFilter:
'''simple docstring'''
UpperCAmelCase__ = tau * frequency / samplerate
UpperCAmelCase__ = sin(__A )
UpperCAmelCase__ = cos(__A )
UpperCAmelCase__ = _sin / (2 * q_factor)
UpperCAmelCase__ = 1_0 ** (gain_db / 4_0)
UpperCAmelCase__ = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase__ = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase__ = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase__ = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase__ = 2 * sqrt(__A ) * alpha
UpperCAmelCase__ = big_a * (pmc + aaa)
UpperCAmelCase__ = 2 * big_a * mpc
UpperCAmelCase__ = big_a * (pmc - aaa)
UpperCAmelCase__ = ppmc + aaa
UpperCAmelCase__ = -2 * pmpc
UpperCAmelCase__ = ppmc - aaa
UpperCAmelCase__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __UpperCAmelCase ( __A , __A , __A , __A = 1 / sqrt(2 ) , ) -> IIRFilter:
'''simple docstring'''
UpperCAmelCase__ = tau * frequency / samplerate
UpperCAmelCase__ = sin(__A )
UpperCAmelCase__ = cos(__A )
UpperCAmelCase__ = _sin / (2 * q_factor)
UpperCAmelCase__ = 1_0 ** (gain_db / 4_0)
UpperCAmelCase__ = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase__ = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase__ = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase__ = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase__ = 2 * sqrt(__A ) * alpha
UpperCAmelCase__ = big_a * (ppmc + aaa)
UpperCAmelCase__ = -2 * big_a * pmpc
UpperCAmelCase__ = big_a * (ppmc - aaa)
UpperCAmelCase__ = pmc + aaa
UpperCAmelCase__ = 2 * mpc
UpperCAmelCase__ = pmc - aaa
UpperCAmelCase__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 475 |
import math
import unittest
def __UpperCAmelCase ( __A ) -> bool:
'''simple docstring'''
assert isinstance(__A , __A ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class lowercase__ ( unittest.TestCase ):
def _UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def _UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
with self.assertRaises(_lowercase ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , "Zero doesn't have any positive factors, primes must have exactly two." , )
self.assertFalse(
is_prime(1 ) , "One only has 1 positive factor, primes must have exactly two." , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 475 | 1 |
from __future__ import annotations
from cmath import sqrt
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if a == 0:
raise ValueError("""Coefficient 'a' must not be zero.""" )
__magic_name__ : Optional[int] =b * b - 4 * a * c
__magic_name__ : str =(-b + sqrt(lowerCamelCase )) / (2 * a)
__magic_name__ : List[str] =(-b - sqrt(lowerCamelCase )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def lowerCAmelCase_ ( ):
__magic_name__ : Any =quadratic_roots(a=5 , b=6 , c=1 )
print(F"The solutions are: {solutiona} and {solutiona}" )
if __name__ == "__main__":
main()
| 720 |
UpperCAmelCase_ : int = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
UpperCAmelCase_ : Optional[Any] = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
UpperCAmelCase_ : int = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
UpperCAmelCase_ : Dict = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
UpperCAmelCase_ : int = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
UpperCAmelCase_ : str = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
UpperCAmelCase_ : Any = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
UpperCAmelCase_ : Any = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 367 | 0 |
def __lowerCamelCase ( _lowerCAmelCase = 10**9 ) -> Dict:
_UpperCAmelCase = 1
_UpperCAmelCase = 2
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
_UpperCAmelCase = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F'''{solution() = }''')
| 684 | """simple docstring"""
def SCREAMING_SNAKE_CASE ( snake_case):
__snake_case = hex_num.strip()
if not hex_num:
raise ValueError('''No value was passed to the function''')
__snake_case = hex_num[0] == '''-'''
if is_negative:
__snake_case = hex_num[1:]
try:
__snake_case = int(snake_case, 16)
except ValueError:
raise ValueError('''Invalid value was passed to the function''')
__snake_case = ''''''
while int_num > 0:
__snake_case = str(int_num % 2) + bin_str
int_num >>= 1
return int(('''-''' + bin_str) if is_negative else bin_str)
if __name__ == "__main__":
import doctest
doctest.testmod() | 564 | 0 |
_lowerCamelCase : int = [0, 2, 4, 6, 8]
_lowerCamelCase : Tuple = [1, 3, 5, 7, 9]
def __a ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
SCREAMING_SNAKE_CASE : Any = 0
for digit in range(10 ):
SCREAMING_SNAKE_CASE : Tuple = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , __lowerCAmelCase , __lowerCAmelCase )
return result
SCREAMING_SNAKE_CASE : Optional[int] = 0
for digita in range(10 ):
SCREAMING_SNAKE_CASE : List[Any] = digita
if (remainder + digita) % 2 == 0:
SCREAMING_SNAKE_CASE : Optional[Any] = ODD_DIGITS
else:
SCREAMING_SNAKE_CASE : Tuple = EVEN_DIGITS
for digita in other_parity_digits:
SCREAMING_SNAKE_CASE : Optional[Any] = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , __lowerCAmelCase , __lowerCAmelCase , )
return result
def __a ( __lowerCAmelCase = 9 ) -> int:
SCREAMING_SNAKE_CASE : int = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(__lowerCAmelCase , 0 , [0] * length , __lowerCAmelCase )
return result
if __name__ == "__main__":
print(f"""{solution() = }""") | 308 |
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
_lowerCamelCase : int = """base_with_context"""
def __a ( __lowerCAmelCase , __lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE : Optional[int] = nn.Parameter(torch.FloatTensor(weights['token_embedder']['embedding'] ) )
SCREAMING_SNAKE_CASE : Dict = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__lowerCAmelCase )
for lyr_num, lyr in enumerate(model.encoders ):
SCREAMING_SNAKE_CASE : Any = weights[F'''layers_{lyr_num}''']
SCREAMING_SNAKE_CASE : int = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
SCREAMING_SNAKE_CASE : List[Any] = ly_weight['attention']
SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
SCREAMING_SNAKE_CASE : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
SCREAMING_SNAKE_CASE : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
SCREAMING_SNAKE_CASE : int = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
SCREAMING_SNAKE_CASE : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
SCREAMING_SNAKE_CASE : int = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def __a ( __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.FloatTensor(weights['input_proj']['kernel'].T ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__lowerCAmelCase )
for lyr_num, lyr in enumerate(model.encoders ):
SCREAMING_SNAKE_CASE : Dict = weights[F'''layers_{lyr_num}''']
SCREAMING_SNAKE_CASE : List[Any] = ly_weight['attention']
SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
SCREAMING_SNAKE_CASE : Tuple = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
SCREAMING_SNAKE_CASE : str = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
SCREAMING_SNAKE_CASE : Optional[int] = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def __a ( __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.FloatTensor(weights['time_emb_dense0']['kernel'].T ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.FloatTensor(weights['time_emb_dense1']['kernel'].T ) )
SCREAMING_SNAKE_CASE : Optional[Any] = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__lowerCAmelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(
torch.FloatTensor(weights['continuous_inputs_projection']['kernel'].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
SCREAMING_SNAKE_CASE : Tuple = weights[F'''layers_{lyr_num}''']
SCREAMING_SNAKE_CASE : Dict = nn.Parameter(
torch.FloatTensor(ly_weight['pre_self_attention_layer_norm']['scale'] ) )
SCREAMING_SNAKE_CASE : int = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_0']['DenseGeneral_0']['kernel'].T ) )
SCREAMING_SNAKE_CASE : Any = ly_weight['self_attention']
SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
SCREAMING_SNAKE_CASE : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
SCREAMING_SNAKE_CASE : List[str] = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
SCREAMING_SNAKE_CASE : str = ly_weight['MultiHeadDotProductAttention_0']
SCREAMING_SNAKE_CASE : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
SCREAMING_SNAKE_CASE : Tuple = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(
torch.FloatTensor(ly_weight['pre_cross_attention_layer_norm']['scale'] ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
SCREAMING_SNAKE_CASE : int = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_1']['DenseGeneral_0']['kernel'].T ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
SCREAMING_SNAKE_CASE : str = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.FloatTensor(weights['decoder_norm']['scale'] ) )
SCREAMING_SNAKE_CASE : List[str] = nn.Parameter(torch.FloatTensor(weights['spec_out_dense']['kernel'].T ) )
return model
def __a ( __lowerCAmelCase ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : Optional[int] = checkpoints.load_tax_checkpoint(args.checkpoint_path )
SCREAMING_SNAKE_CASE : int = jnp.tree_util.tree_map(onp.array , __lowerCAmelCase )
SCREAMING_SNAKE_CASE : Tuple = [
'from __gin__ import dynamic_registration',
'from music_spectrogram_diffusion.models.diffusion import diffusion_utils',
'diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0',
'diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()',
]
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(args.checkpoint_path , '..' , 'config.gin' )
SCREAMING_SNAKE_CASE : Dict = inference.parse_training_gin_file(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE : Dict = inference.InferenceModel(args.checkpoint_path , __lowerCAmelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = DDPMScheduler(beta_schedule='squaredcos_cap_v2' , variance_type='fixed_large' )
SCREAMING_SNAKE_CASE : List[Any] = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['inputs'] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
SCREAMING_SNAKE_CASE : str = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['targets_context'] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
SCREAMING_SNAKE_CASE : str = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['targets_context'] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
SCREAMING_SNAKE_CASE : Union[str, Any] = load_notes_encoder(ta_checkpoint['target']['token_encoder'] , __lowerCAmelCase )
SCREAMING_SNAKE_CASE : Tuple = load_continuous_encoder(ta_checkpoint['target']['continuous_encoder'] , __lowerCAmelCase )
SCREAMING_SNAKE_CASE : Any = load_decoder(ta_checkpoint['target']['decoder'] , __lowerCAmelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = OnnxRuntimeModel.from_pretrained('kashif/soundstream_mel_decoder' )
SCREAMING_SNAKE_CASE : int = SpectrogramDiffusionPipeline(
notes_encoder=__lowerCAmelCase , continuous_encoder=__lowerCAmelCase , decoder=__lowerCAmelCase , scheduler=__lowerCAmelCase , melgan=__lowerCAmelCase , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
_lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("""--output_path""", default=None, type=str, required=True, help="""Path to the converted model.""")
parser.add_argument(
"""--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not."""
)
parser.add_argument(
"""--checkpoint_path""",
default=f"""{MODEL}/checkpoint_500000""",
type=str,
required=False,
help="""Path to the original jax model checkpoint.""",
)
_lowerCamelCase : Any = parser.parse_args()
main(args) | 308 | 1 |
def _A ( SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: Dict = len(SCREAMING_SNAKE_CASE )
for _ in range(SCREAMING_SNAKE_CASE ):
for i in range(_ % 2 ,arr_size - 1 ,2 ):
if arr[i + 1] < arr[i]:
UpperCAmelCase__ , UpperCAmelCase__: str = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
_lowerCAmelCase : List[str] =list(range(10, 0, -1))
print(F"Original: {arr}. Sorted: {odd_even_transposition(arr)}") | 113 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
_lowerCAmelCase : Optional[Any] =logging.getLogger(__name__)
@dataclass
class __UpperCamelCase :
'''simple docstring'''
__magic_name__ = field(
default=1_2_8 ,metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} ,)
__magic_name__ = field(
default=_a ,metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
__magic_name__ = field(
default=_a ,metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} ,)
__magic_name__ = field(
default=_a ,metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} ,)
__magic_name__ = field(
default=_a ,metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} ,)
__magic_name__ = field(
default=_a ,metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} ,)
@dataclass
class __UpperCamelCase :
'''simple docstring'''
__magic_name__ = field(
default=_a ,metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__magic_name__ = field(
default=_a ,metadata={"help": "Evaluation language. Also train language if `train_language` is set to None."} )
__magic_name__ = field(
default=_a ,metadata={"help": "Train language if it is different from the evaluation language."} )
__magic_name__ = field(
default=_a ,metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__magic_name__ = field(
default=_a ,metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
__magic_name__ = field(
default=_a ,metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} ,)
__magic_name__ = field(
default=_a ,metadata={"help": "arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"} ,)
__magic_name__ = field(
default=_a ,metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} ,)
__magic_name__ = field(
default="main" ,metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} ,)
__magic_name__ = field(
default=_a ,metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} ,)
__magic_name__ = field(
default=_a ,metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} ,)
def _A ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase__: Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__: List[Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_xnli" ,SCREAMING_SNAKE_CASE )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" ,datefmt="%m/%d/%Y %H:%M:%S" ,handlers=[logging.StreamHandler(sys.stdout )] ,)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCAmelCase__: int = training_args.get_process_log_level()
logger.setLevel(SCREAMING_SNAKE_CASE )
datasets.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE )
transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
UpperCAmelCase__: Tuple = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase__: Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
UpperCAmelCase__: Optional[int] = load_dataset(
"xnli" ,model_args.language ,split="train" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
else:
UpperCAmelCase__: Optional[Any] = load_dataset(
"xnli" ,model_args.train_language ,split="train" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
UpperCAmelCase__: List[Any] = train_dataset.features["label"].names
if training_args.do_eval:
UpperCAmelCase__: List[Any] = load_dataset(
"xnli" ,model_args.language ,split="validation" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
UpperCAmelCase__: Tuple = eval_dataset.features["label"].names
if training_args.do_predict:
UpperCAmelCase__: Optional[Any] = load_dataset(
"xnli" ,model_args.language ,split="test" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
UpperCAmelCase__: List[Any] = predict_dataset.features["label"].names
# Labels
UpperCAmelCase__: Union[str, Any] = len(SCREAMING_SNAKE_CASE )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase__: Optional[int] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=SCREAMING_SNAKE_CASE ,idalabel={str(SCREAMING_SNAKE_CASE ): label for i, label in enumerate(SCREAMING_SNAKE_CASE )} ,labelaid={label: i for i, label in enumerate(SCREAMING_SNAKE_CASE )} ,finetuning_task="xnli" ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
UpperCAmelCase__: Dict = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,do_lower_case=model_args.do_lower_case ,cache_dir=model_args.cache_dir ,use_fast=model_args.use_fast_tokenizer ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
UpperCAmelCase__: Optional[Any] = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool(".ckpt" in model_args.model_name_or_path ) ,config=SCREAMING_SNAKE_CASE ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,ignore_mismatched_sizes=model_args.ignore_mismatched_sizes ,)
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
UpperCAmelCase__: Union[str, Any] = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
UpperCAmelCase__: Union[str, Any] = False
def preprocess_function(SCREAMING_SNAKE_CASE ):
# Tokenize the texts
return tokenizer(
examples["premise"] ,examples["hypothesis"] ,padding=SCREAMING_SNAKE_CASE ,max_length=data_args.max_seq_length ,truncation=SCREAMING_SNAKE_CASE ,)
if training_args.do_train:
if data_args.max_train_samples is not None:
UpperCAmelCase__: Optional[Any] = min(len(SCREAMING_SNAKE_CASE ) ,data_args.max_train_samples )
UpperCAmelCase__: Optional[Any] = train_dataset.select(range(SCREAMING_SNAKE_CASE ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
UpperCAmelCase__: Dict = train_dataset.map(
SCREAMING_SNAKE_CASE ,batched=SCREAMING_SNAKE_CASE ,load_from_cache_file=not data_args.overwrite_cache ,desc="Running tokenizer on train dataset" ,)
# Log a few random samples from the training set:
for index in random.sample(range(len(SCREAMING_SNAKE_CASE ) ) ,3 ):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}." )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
UpperCAmelCase__: Optional[int] = min(len(SCREAMING_SNAKE_CASE ) ,data_args.max_eval_samples )
UpperCAmelCase__: List[Any] = eval_dataset.select(range(SCREAMING_SNAKE_CASE ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
UpperCAmelCase__: Optional[Any] = eval_dataset.map(
SCREAMING_SNAKE_CASE ,batched=SCREAMING_SNAKE_CASE ,load_from_cache_file=not data_args.overwrite_cache ,desc="Running tokenizer on validation dataset" ,)
if training_args.do_predict:
if data_args.max_predict_samples is not None:
UpperCAmelCase__: Dict = min(len(SCREAMING_SNAKE_CASE ) ,data_args.max_predict_samples )
UpperCAmelCase__: Optional[Any] = predict_dataset.select(range(SCREAMING_SNAKE_CASE ) )
with training_args.main_process_first(desc="prediction dataset map pre-processing" ):
UpperCAmelCase__: int = predict_dataset.map(
SCREAMING_SNAKE_CASE ,batched=SCREAMING_SNAKE_CASE ,load_from_cache_file=not data_args.overwrite_cache ,desc="Running tokenizer on prediction dataset" ,)
# Get the metric function
UpperCAmelCase__: str = evaluate.load("xnli" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: str = p.predictions[0] if isinstance(p.predictions ,SCREAMING_SNAKE_CASE ) else p.predictions
UpperCAmelCase__: Union[str, Any] = np.argmax(SCREAMING_SNAKE_CASE ,axis=1 )
return metric.compute(predictions=SCREAMING_SNAKE_CASE ,references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
UpperCAmelCase__: Any = default_data_collator
elif training_args.fpaa:
UpperCAmelCase__: Any = DataCollatorWithPadding(SCREAMING_SNAKE_CASE ,pad_to_multiple_of=8 )
else:
UpperCAmelCase__: Tuple = None
# Initialize our Trainer
UpperCAmelCase__: Dict = Trainer(
model=SCREAMING_SNAKE_CASE ,args=SCREAMING_SNAKE_CASE ,train_dataset=train_dataset if training_args.do_train else None ,eval_dataset=eval_dataset if training_args.do_eval else None ,compute_metrics=SCREAMING_SNAKE_CASE ,tokenizer=SCREAMING_SNAKE_CASE ,data_collator=SCREAMING_SNAKE_CASE ,)
# Training
if training_args.do_train:
UpperCAmelCase__: Any = None
if training_args.resume_from_checkpoint is not None:
UpperCAmelCase__: str = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCAmelCase__: Any = last_checkpoint
UpperCAmelCase__: Dict = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE )
UpperCAmelCase__: List[Any] = train_result.metrics
UpperCAmelCase__: Union[str, Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(SCREAMING_SNAKE_CASE )
)
UpperCAmelCase__: Tuple = min(SCREAMING_SNAKE_CASE ,len(SCREAMING_SNAKE_CASE ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train" ,SCREAMING_SNAKE_CASE )
trainer.save_metrics("train" ,SCREAMING_SNAKE_CASE )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
UpperCAmelCase__: Any = trainer.evaluate(eval_dataset=SCREAMING_SNAKE_CASE )
UpperCAmelCase__: Optional[int] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(SCREAMING_SNAKE_CASE )
UpperCAmelCase__: int = min(SCREAMING_SNAKE_CASE ,len(SCREAMING_SNAKE_CASE ) )
trainer.log_metrics("eval" ,SCREAMING_SNAKE_CASE )
trainer.save_metrics("eval" ,SCREAMING_SNAKE_CASE )
# Prediction
if training_args.do_predict:
logger.info("*** Predict ***" )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__: Union[str, Any] = trainer.predict(SCREAMING_SNAKE_CASE ,metric_key_prefix="predict" )
UpperCAmelCase__: Optional[Any] = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(SCREAMING_SNAKE_CASE )
)
UpperCAmelCase__: Union[str, Any] = min(SCREAMING_SNAKE_CASE ,len(SCREAMING_SNAKE_CASE ) )
trainer.log_metrics("predict" ,SCREAMING_SNAKE_CASE )
trainer.save_metrics("predict" ,SCREAMING_SNAKE_CASE )
UpperCAmelCase__: Any = np.argmax(SCREAMING_SNAKE_CASE ,axis=1 )
UpperCAmelCase__: Optional[int] = os.path.join(training_args.output_dir ,"predictions.txt" )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE ,"w" ) as writer:
writer.write("index\tprediction\n" )
for index, item in enumerate(SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: int = label_list[item]
writer.write(f"{index}\t{item}\n" )
if __name__ == "__main__":
main() | 113 | 1 |
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__snake_case : Union[str, Any] =logging.get_logger(__name__)
class lowerCamelCase__ ( _SCREAMING_SNAKE_CASE):
'''simple docstring'''
snake_case_ =["input_values", "padding_mask"]
def __init__(self ,__lowerCamelCase = 1 ,__lowerCamelCase = 2_40_00 ,__lowerCamelCase = 0.0 ,__lowerCamelCase = None ,__lowerCamelCase = None ,**__lowerCamelCase ,) -> Union[str, Any]:
"""simple docstring"""
super().__init__(feature_size=A_ ,sampling_rate=A_ ,padding_value=A_ ,**A_ )
lowerCAmelCase__ : Any = chunk_length_s
lowerCAmelCase__ : List[Any] = overlap
@property
def lowerCAmelCase__ (self ) -> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def lowerCAmelCase__ (self ) -> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 ,int((1.0 - self.overlap) * self.chunk_length ) )
def __call__(self ,__lowerCamelCase ,__lowerCamelCase = None ,__lowerCamelCase = False ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
if padding and truncation:
raise ValueError('''Both padding and truncation were set. Make sure you only set one.''' )
elif padding is None:
# by default let's pad the inputs
lowerCAmelCase__ : Tuple = True
lowerCAmelCase__ : Union[str, Any] = bool(
isinstance(A_ ,(list, tuple) ) and (isinstance(raw_audio[0] ,(np.ndarray, tuple, list) )) )
if is_batched:
lowerCAmelCase__ : Any = [np.asarray(A_ ,dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(A_ ,np.ndarray ):
lowerCAmelCase__ : Dict = np.asarray(A_ ,dtype=np.floataa )
elif isinstance(A_ ,np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
lowerCAmelCase__ : str = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase__ : Any = [np.asarray(A_ ).T]
# verify inputs are valid
for idx, example in enumerate(A_ ):
if example.ndim > 2:
raise ValueError(f"""Expected input shape (channels, length) but got shape {example.shape}""" )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(f"""Expected mono audio but example has {example.shape[-1]} channels""" )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(f"""Expected stereo audio but example has {example.shape[-1]} channels""" )
lowerCAmelCase__ : List[str] = None
lowerCAmelCase__ : int = BatchFeature({'''input_values''': raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
lowerCAmelCase__ : Optional[Any] = min(array.shape[0] for array in raw_audio )
lowerCAmelCase__ : Tuple = int(np.floor(max_length / self.chunk_stride ) )
lowerCAmelCase__ : str = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
lowerCAmelCase__ : Optional[int] = max(array.shape[0] for array in raw_audio )
lowerCAmelCase__ : Union[str, Any] = int(np.ceil(max_length / self.chunk_stride ) )
lowerCAmelCase__ : List[Any] = (nb_step - 1) * self.chunk_stride + self.chunk_length
lowerCAmelCase__ : Tuple = '''max_length'''
else:
lowerCAmelCase__ : List[str] = input_values
# normal padding on batch
if padded_inputs is None:
lowerCAmelCase__ : List[Any] = self.pad(
A_ ,max_length=A_ ,truncation=A_ ,padding=A_ ,return_attention_mask=A_ ,)
if padding:
lowerCAmelCase__ : Dict = padded_inputs.pop('''attention_mask''' )
lowerCAmelCase__ : List[Any] = []
for example in padded_inputs.pop('''input_values''' ):
if self.feature_size == 1:
lowerCAmelCase__ : Optional[int] = example[..., None]
input_values.append(example.T )
lowerCAmelCase__ : List[Any] = input_values
if return_tensors is not None:
lowerCAmelCase__ : str = padded_inputs.convert_to_tensors(A_ )
return padded_inputs
| 703 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Any =logging.get_logger(__name__)
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ ="""encoder-decoder"""
snake_case_ =True
def __init__(self ,**__lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**__lowerCamelCase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
lowerCAmelCase__ : Optional[Any] = kwargs.pop('''encoder''' )
lowerCAmelCase__ : Any = encoder_config.pop('''model_type''' )
lowerCAmelCase__ : str = kwargs.pop('''decoder''' )
lowerCAmelCase__ : Tuple = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
lowerCAmelCase__ : Tuple = AutoConfig.for_model(__lowerCamelCase ,**__lowerCamelCase )
lowerCAmelCase__ : Any = AutoConfig.for_model(__lowerCamelCase ,**__lowerCamelCase )
lowerCAmelCase__ : str = True
@classmethod
def lowerCAmelCase__ (cls ,__lowerCamelCase ,__lowerCamelCase ,**__lowerCamelCase ) -> PretrainedConfig:
"""simple docstring"""
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
lowerCAmelCase__ : Optional[int] = True
lowerCAmelCase__ : List[str] = True
return cls(encoder=encoder_config.to_dict() ,decoder=decoder_config.to_dict() ,**__lowerCamelCase )
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : int = copy.deepcopy(self.__dict__ )
lowerCAmelCase__ : Optional[Any] = self.encoder.to_dict()
lowerCAmelCase__ : str = self.decoder.to_dict()
lowerCAmelCase__ : Optional[int] = self.__class__.model_type
return output
| 90 | 0 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def _lowerCAmelCase ( lowerCAmelCase_ :dict , lowerCAmelCase_ :str , lowerCAmelCase_ :set , lowerCAmelCase_ :set , lowerCAmelCase_ :dict , lowerCAmelCase_ :dict , lowerCAmelCase_ :PriorityQueue , lowerCAmelCase_ :dict , lowerCAmelCase_ :float | int , )->float | int:
'''simple docstring'''
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
snake_case_ = cst_fwd.get(lowerCAmelCase_ , np.inf )
snake_case_ = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
snake_case_ = new_cost_f
snake_case_ = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
snake_case_ = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def _lowerCAmelCase ( lowerCAmelCase_ :str , lowerCAmelCase_ :str , lowerCAmelCase_ :dict , lowerCAmelCase_ :dict )->int:
'''simple docstring'''
snake_case_ = -1
snake_case_ = set()
snake_case_ = set()
snake_case_ = {source: 0}
snake_case_ = {destination: 0}
snake_case_ = {source: None}
snake_case_ = {destination: None}
snake_case_ = PriorityQueue()
snake_case_ = PriorityQueue()
snake_case_ = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
snake_case_ , snake_case_ = queue_forward.get()
visited_forward.add(lowerCAmelCase_ )
snake_case_ , snake_case_ = queue_backward.get()
visited_backward.add(lowerCAmelCase_ )
snake_case_ = pass_and_relaxation(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
snake_case_ = pass_and_relaxation(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
snake_case_ = shortest_distance
return shortest_path_distance
SCREAMING_SNAKE_CASE :Optional[int] = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
SCREAMING_SNAKE_CASE :Optional[int] = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 283 |
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class __lowerCAmelCase ( tf.keras.optimizers.schedules.LearningRateSchedule ):
"""simple docstring"""
def __init__( self : Union[str, Any] , _lowerCAmelCase : float , _lowerCAmelCase : Callable , _lowerCAmelCase : int , _lowerCAmelCase : float = 1.0 , _lowerCAmelCase : str = None , ) -> str:
"""simple docstring"""
super().__init__()
snake_case_ = initial_learning_rate
snake_case_ = warmup_steps
snake_case_ = power
snake_case_ = decay_schedule_fn
snake_case_ = name
def __call__( self : Any , _lowerCAmelCase : int ) -> Tuple:
"""simple docstring"""
with tf.name_scope(self.name or "WarmUp" ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
snake_case_ = tf.cast(_lowerCAmelCase , tf.floataa )
snake_case_ = tf.cast(self.warmup_steps , tf.floataa )
snake_case_ = global_step_float / warmup_steps_float
snake_case_ = self.initial_learning_rate * tf.math.pow(_lowerCAmelCase , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=_lowerCAmelCase , )
def lowerCAmelCase__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def _lowerCAmelCase ( lowerCAmelCase_ :float , lowerCAmelCase_ :int , lowerCAmelCase_ :int , lowerCAmelCase_ :float = 0.0 , lowerCAmelCase_ :float = 0.9 , lowerCAmelCase_ :float = 0.9_9_9 , lowerCAmelCase_ :float = 1e-8 , lowerCAmelCase_ :Optional[float] = None , lowerCAmelCase_ :Optional[float] = None , lowerCAmelCase_ :float = 0.0 , lowerCAmelCase_ :float = 1.0 , lowerCAmelCase_ :Optional[List[str]] = None , )->Optional[Any]:
'''simple docstring'''
snake_case_ = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=lowerCAmelCase_ , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=lowerCAmelCase_ , )
if num_warmup_steps:
snake_case_ = WarmUp(
initial_learning_rate=lowerCAmelCase_ , decay_schedule_fn=lowerCAmelCase_ , warmup_steps=lowerCAmelCase_ , )
if weight_decay_rate > 0.0:
snake_case_ = AdamWeightDecay(
learning_rate=lowerCAmelCase_ , weight_decay_rate=lowerCAmelCase_ , beta_a=lowerCAmelCase_ , beta_a=lowerCAmelCase_ , epsilon=lowerCAmelCase_ , clipnorm=lowerCAmelCase_ , global_clipnorm=lowerCAmelCase_ , exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"] , include_in_weight_decay=lowerCAmelCase_ , )
else:
snake_case_ = tf.keras.optimizers.Adam(
learning_rate=lowerCAmelCase_ , beta_a=lowerCAmelCase_ , beta_a=lowerCAmelCase_ , epsilon=lowerCAmelCase_ , clipnorm=lowerCAmelCase_ , global_clipnorm=lowerCAmelCase_ , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class __lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self : Dict , _lowerCAmelCase : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.001 , _lowerCAmelCase : float = 0.9 , _lowerCAmelCase : float = 0.999 , _lowerCAmelCase : float = 1e-7 , _lowerCAmelCase : bool = False , _lowerCAmelCase : float = 0.0 , _lowerCAmelCase : Optional[List[str]] = None , _lowerCAmelCase : Optional[List[str]] = None , _lowerCAmelCase : str = "AdamWeightDecay" , **_lowerCAmelCase : Union[str, Any] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
snake_case_ = weight_decay_rate
snake_case_ = include_in_weight_decay
snake_case_ = exclude_from_weight_decay
@classmethod
def lowerCAmelCase__ ( cls : Dict , _lowerCAmelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
snake_case_ = {"WarmUp": WarmUp}
return super(_lowerCAmelCase , cls ).from_config(_lowerCAmelCase , custom_objects=_lowerCAmelCase )
def lowerCAmelCase__ ( self : List[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
super(_lowerCAmelCase , self )._prepare_local(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
snake_case_ = tf.constant(
self.weight_decay_rate , name="adam_weight_decay_rate" )
def lowerCAmelCase__ ( self : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] ) -> int:
"""simple docstring"""
snake_case_ = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["weight_decay_rate"] , use_locking=self._use_locking , )
return tf.no_op()
def lowerCAmelCase__ ( self : List[str] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int]=None , **_lowerCAmelCase : Dict ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ , snake_case_ = list(zip(*_lowerCAmelCase ) )
return super(_lowerCAmelCase , self ).apply_gradients(zip(_lowerCAmelCase , _lowerCAmelCase ) , name=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self : List[str] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
snake_case_ = apply_state or {}
snake_case_ = apply_state.get((var_device, var_dtype) )
if coefficients is None:
snake_case_ = self._fallback_apply_state(_lowerCAmelCase , _lowerCAmelCase )
snake_case_ = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def lowerCAmelCase__ ( self : int , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple=None ) -> List[str]:
"""simple docstring"""
snake_case_ , snake_case_ = self._get_lr(var.device , var.dtype.base_dtype , _lowerCAmelCase )
snake_case_ = self._decay_weights_op(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
with tf.control_dependencies([decay] ):
return super(_lowerCAmelCase , self )._resource_apply_dense(_lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : str , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple=None ) -> List[str]:
"""simple docstring"""
snake_case_ , snake_case_ = self._get_lr(var.device , var.dtype.base_dtype , _lowerCAmelCase )
snake_case_ = self._decay_weights_op(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
with tf.control_dependencies([decay] ):
return super(_lowerCAmelCase , self )._resource_apply_sparse(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
snake_case_ = super().get_config()
config.update({"weight_decay_rate": self.weight_decay_rate} )
return config
def lowerCAmelCase__ ( self : Union[str, Any] , _lowerCAmelCase : List[str] ) -> Tuple:
"""simple docstring"""
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(_lowerCAmelCase , _lowerCAmelCase ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(_lowerCAmelCase , _lowerCAmelCase ) is not None:
return False
return True
class __lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self : str ) -> Dict:
"""simple docstring"""
snake_case_ = []
snake_case_ = None
@property
def lowerCAmelCase__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
if self._accum_steps is None:
snake_case_ = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=_lowerCAmelCase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def lowerCAmelCase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
if not self._gradients:
raise ValueError("The accumulator should be called first to initialize the gradients" )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : List[Any] , _lowerCAmelCase : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if not self._gradients:
snake_case_ = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(_lowerCAmelCase ) , trainable=_lowerCAmelCase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(_lowerCAmelCase ) != len(self._gradients ):
raise ValueError(F'''Expected {len(self._gradients )} gradients, but got {len(_lowerCAmelCase )}''' )
for accum_gradient, gradient in zip(self._gradients , _lowerCAmelCase ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(_lowerCAmelCase )
self._accum_steps.assign_add(1 )
def lowerCAmelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(_lowerCAmelCase ) )
| 283 | 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : List[Any] = tempfile.mkdtemp()
_lowerCamelCase : Any = BlipImageProcessor()
_lowerCamelCase : List[str] = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
_lowerCamelCase : Any = BlipaProcessor(__lowerCAmelCase , __lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : List[Any] , **__lowerCAmelCase : Dict ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase ).tokenizer
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , **__lowerCAmelCase : List[Any] ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase ).image_processor
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Tuple = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
_lowerCamelCase : Any = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Any = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowerCamelCase : List[Any] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_lowerCamelCase : Optional[int] = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 )
_lowerCamelCase : Any = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Tuple = self.get_image_processor()
_lowerCamelCase : List[Any] = self.get_tokenizer()
_lowerCamelCase : Dict = BlipaProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_lowerCamelCase : Dict = self.prepare_image_inputs()
_lowerCamelCase : Optional[Any] = image_processor(__lowerCAmelCase , return_tensors='''np''' )
_lowerCamelCase : List[Any] = processor(images=__lowerCAmelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : str = self.get_image_processor()
_lowerCamelCase : Tuple = self.get_tokenizer()
_lowerCamelCase : Union[str, Any] = BlipaProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_lowerCamelCase : int = '''lower newer'''
_lowerCamelCase : Optional[Any] = processor(text=__lowerCAmelCase )
_lowerCamelCase : Tuple = tokenizer(__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : List[str] = self.get_image_processor()
_lowerCamelCase : Optional[Any] = self.get_tokenizer()
_lowerCamelCase : Optional[int] = BlipaProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = '''lower newer'''
_lowerCamelCase : Any = self.prepare_image_inputs()
_lowerCamelCase : List[str] = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase ):
processor()
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.get_image_processor()
_lowerCamelCase : Any = self.get_tokenizer()
_lowerCamelCase : Union[str, Any] = BlipaProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_lowerCamelCase : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCamelCase : List[Any] = processor.batch_decode(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = tokenizer.batch_decode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = self.get_image_processor()
_lowerCamelCase : Dict = self.get_tokenizer()
_lowerCamelCase : Any = BlipaProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_lowerCamelCase : Tuple = '''lower newer'''
_lowerCamelCase : Optional[Any] = self.prepare_image_inputs()
_lowerCamelCase : Any = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
| 598 |
"""simple docstring"""
def snake_case_ ( A_ : int ):
'''simple docstring'''
if not isinstance(A_, A_ ):
_lowerCamelCase : List[Any] = F'''Input value of [number={number}] must be an integer'''
raise TypeError(A_ )
if number < 1:
_lowerCamelCase : List[str] = F'''Input value of [number={number}] must be > 0'''
raise ValueError(A_ )
_lowerCamelCase : List[Any] = 1
for i in range(1, A_ ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 598 | 1 |
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class snake_case :
"""simple docstring"""
def __init__( self : Optional[int] , __A : int , __A : str=1_3 , __A : Optional[int]=1_0 , __A : int=3 , __A : int=2 , __A : Tuple=2 , __A : str=2 , __A : str=True , __A : List[str]=True , __A : Optional[int]=3_2 , __A : Any=5 , __A : List[str]=4 , __A : Any=3_7 , __A : List[str]="gelu" , __A : List[Any]=0.1 , __A : Optional[Any]=0.1 , __A : Union[str, Any]=1_0 , __A : Optional[Any]=0.02 , __A : Union[str, Any]=0.9 , __A : Any=None , ):
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = image_size
__UpperCamelCase = num_channels
__UpperCamelCase = patch_size
__UpperCamelCase = tubelet_size
__UpperCamelCase = num_frames
__UpperCamelCase = is_training
__UpperCamelCase = use_labels
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = type_sequence_label_size
__UpperCamelCase = initializer_range
__UpperCamelCase = mask_ratio
__UpperCamelCase = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
__UpperCamelCase = (image_size // patch_size) ** 2
__UpperCamelCase = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
__UpperCamelCase = int(mask_ratio * self.seq_length )
def _lowerCamelCase ( self : Optional[Any] ):
__UpperCamelCase = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self : List[str] ):
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__A , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self : int , __A : str , __A : str , __A : Any ):
__UpperCamelCase = VideoMAEModel(config=__A )
model.to(__A )
model.eval()
__UpperCamelCase = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : Tuple , __A : List[Any] , __A : Union[str, Any] , __A : Dict ):
__UpperCamelCase = VideoMAEForPreTraining(__A )
model.to(__A )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
__UpperCamelCase = torch.ones((self.num_masks,) )
__UpperCamelCase = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
__UpperCamelCase = mask.expand(self.batch_size , -1 ).bool()
__UpperCamelCase = model(__A , __A )
# model only returns predictions for masked patches
__UpperCamelCase = mask.sum().item()
__UpperCamelCase = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def _lowerCamelCase ( self : List[str] ):
__UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs
__UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class snake_case ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any =(
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE_ : Dict =(
{"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Dict =False
SCREAMING_SNAKE_CASE_ : Optional[Any] =False
SCREAMING_SNAKE_CASE_ : str =False
SCREAMING_SNAKE_CASE_ : Any =False
def _lowerCamelCase ( self : Optional[int] ):
__UpperCamelCase = VideoMAEModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=__A , has_text_modality=__A , hidden_size=3_7 )
def _lowerCamelCase ( self : Optional[Any] , __A : Any , __A : Union[str, Any] , __A : Dict=False ):
__UpperCamelCase = copy.deepcopy(__A )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
__UpperCamelCase = torch.ones((self.model_tester.num_masks,) )
__UpperCamelCase = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
__UpperCamelCase = mask.expand(self.model_tester.batch_size , -1 ).bool()
__UpperCamelCase = bool_masked_pos.to(__A )
if return_labels:
if model_class in [
*get_values(__A ),
]:
__UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
return inputs_dict
def _lowerCamelCase ( self : Dict ):
self.config_tester.run_common_tests()
@unittest.skip(reason='VideoMAE does not use inputs_embeds' )
def _lowerCamelCase ( self : Tuple ):
pass
def _lowerCamelCase ( self : Tuple ):
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(__A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A , nn.Linear ) )
def _lowerCamelCase ( self : List[Any] ):
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(__A )
__UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase = [*signature.parameters.keys()]
__UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , __A )
def _lowerCamelCase ( self : Optional[int] ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def _lowerCamelCase ( self : Union[str, Any] ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__A )
@slow
def _lowerCamelCase ( self : List[Any] ):
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = VideoMAEModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def _lowerCamelCase ( self : List[str] ):
if not self.has_attentions:
pass
else:
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase = True
for model_class in self.all_model_classes:
__UpperCamelCase = self.model_tester.seq_length - self.model_tester.num_masks
__UpperCamelCase = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = True
__UpperCamelCase = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
__UpperCamelCase = model(**self._prepare_for_class(__A , __A ) )
__UpperCamelCase = outputs.attentions
self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__UpperCamelCase = True
__UpperCamelCase = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
__UpperCamelCase = model(**self._prepare_for_class(__A , __A ) )
__UpperCamelCase = outputs.attentions
self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
__UpperCamelCase = len(__A )
# Check attention is always last and order is fine
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
__UpperCamelCase = model(**self._prepare_for_class(__A , __A ) )
self.assertEqual(out_len + 1 , len(__A ) )
__UpperCamelCase = outputs.attentions
self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _lowerCamelCase ( self : Any ):
def check_hidden_states_output(__A : Union[str, Any] , __A : Any , __A : Optional[int] ):
__UpperCamelCase = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
__UpperCamelCase = model(**self._prepare_for_class(__A , __A ) )
__UpperCamelCase = outputs.hidden_states
__UpperCamelCase = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__A ) , __A )
__UpperCamelCase = self.model_tester.seq_length - self.model_tester.num_masks
__UpperCamelCase = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = True
check_hidden_states_output(__A , __A , __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCamelCase = True
check_hidden_states_output(__A , __A , __A )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _lowerCamelCase ( self : Union[str, Any] ):
pass
def lowercase__ ( ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
__UpperCamelCase = np.load(__lowercase )
return list(__lowercase )
@require_torch
@require_vision
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowerCamelCase ( self : Union[str, Any] ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def _lowerCamelCase ( self : Optional[Any] ):
__UpperCamelCase = VideoMAEForVideoClassification.from_pretrained('MCG-NJU/videomae-base-finetuned-kinetics' ).to(
__A )
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_video()
__UpperCamelCase = image_processor(__A , return_tensors='pt' ).to(__A )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__A )
# verify the logits
__UpperCamelCase = torch.Size((1, 4_0_0) )
self.assertEqual(outputs.logits.shape , __A )
__UpperCamelCase = torch.tensor([0.3669, -0.0688, -0.2421] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __A , atol=1e-4 ) )
@slow
def _lowerCamelCase ( self : List[Any] ):
__UpperCamelCase = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' ).to(__A )
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_video()
__UpperCamelCase = image_processor(__A , return_tensors='pt' ).to(__A )
# add boolean mask, indicating which patches to mask
__UpperCamelCase = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' )
__UpperCamelCase = torch.load(__A )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__A )
# verify the logits
__UpperCamelCase = torch.Size([1, 1_4_0_8, 1_5_3_6] )
__UpperCamelCase = torch.tensor(
[[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] , device=__A )
self.assertEqual(outputs.logits.shape , __A )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __A , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
__UpperCamelCase = torch.tensor([0.5142] , device=__A )
self.assertTrue(torch.allclose(outputs.loss , __A , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
__UpperCamelCase = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' , norm_pix_loss=__A ).to(
__A )
with torch.no_grad():
__UpperCamelCase = model(**__A )
__UpperCamelCase = torch.tensor(torch.tensor([0.6469] ) , device=__A )
self.assertTrue(torch.allclose(outputs.loss , __A , atol=1e-4 ) )
| 399 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : List[str] ={
'''configuration_rembert''': ['''REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RemBertConfig''', '''RemBertOnnxConfig''']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] =['''RemBertTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[Any] =['''RemBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] =[
'''REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RemBertForCausalLM''',
'''RemBertForMaskedLM''',
'''RemBertForMultipleChoice''',
'''RemBertForQuestionAnswering''',
'''RemBertForSequenceClassification''',
'''RemBertForTokenClassification''',
'''RemBertLayer''',
'''RemBertModel''',
'''RemBertPreTrainedModel''',
'''load_tf_weights_in_rembert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : str =[
'''TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRemBertForCausalLM''',
'''TFRemBertForMaskedLM''',
'''TFRemBertForMultipleChoice''',
'''TFRemBertForQuestionAnswering''',
'''TFRemBertForSequenceClassification''',
'''TFRemBertForTokenClassification''',
'''TFRemBertLayer''',
'''TFRemBertModel''',
'''TFRemBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
a__ : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 399 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json",
}
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'lxmert'
_SCREAMING_SNAKE_CASE = {}
def __init__( self , lowercase=30_522 , lowercase=768 , lowercase=12 , lowercase=9_500 , lowercase=1_600 , lowercase=400 , lowercase=3_072 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=2 , lowercase=0.02 , lowercase=1e-12 , lowercase=9 , lowercase=5 , lowercase=5 , lowercase=2_048 , lowercase=4 , lowercase=6.67 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=True , **lowercase , ) -> Tuple:
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = num_qa_labels
lowerCAmelCase = num_object_labels
lowerCAmelCase = num_attr_labels
lowerCAmelCase = l_layers
lowerCAmelCase = x_layers
lowerCAmelCase = r_layers
lowerCAmelCase = visual_feat_dim
lowerCAmelCase = visual_pos_dim
lowerCAmelCase = visual_loss_normalizer
lowerCAmelCase = task_matched
lowerCAmelCase = task_mask_lm
lowerCAmelCase = task_obj_predict
lowerCAmelCase = task_qa
lowerCAmelCase = visual_obj_loss
lowerCAmelCase = visual_attr_loss
lowerCAmelCase = visual_feat_loss
lowerCAmelCase = {"""vision""": r_layers, """cross_encoder""": x_layers, """language""": l_layers}
super().__init__(**lowercase )
| 393 |
"""simple docstring"""
import pytest
SCREAMING_SNAKE_CASE__ = "__dummy_dataset1__"
SCREAMING_SNAKE_CASE__ = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def UpperCAmelCase__ ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def UpperCAmelCase__ ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
lowerCAmelCase = dataset_loading_script_name
lowerCAmelCase = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=SCREAMING_SNAKE_CASE )
lowerCAmelCase = script_dir / F'{script_name}.py'
with open(SCREAMING_SNAKE_CASE , """w""" ) as f:
f.write(SCREAMING_SNAKE_CASE )
return str(SCREAMING_SNAKE_CASE )
| 393 | 1 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
@dataclass(frozen=__snake_case )
class a__ :
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
@dataclass(frozen=__snake_case )
class a__ :
UpperCAmelCase__ = 42
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class a__ ( __snake_case ):
UpperCAmelCase__ = 42
def __init__( self :Tuple , _lowerCamelCase :str , _lowerCamelCase :PreTrainedTokenizer , _lowerCamelCase :str , _lowerCamelCase :Optional[int] = None , _lowerCamelCase :int=False , _lowerCamelCase :bool = False , ):
'''simple docstring'''
UpperCamelCase_ : Tuple =hans_processors[task]()
UpperCamelCase_ : List[Any] =os.path.join(
__lowerCamelCase , 'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' , tokenizer.__class__.__name__ , str(__lowerCamelCase ) , __lowerCamelCase , ) , )
UpperCamelCase_ : Tuple =processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCamelCase_ , UpperCamelCase_ : Optional[int] =label_list[2], label_list[1]
UpperCamelCase_ : Any =label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCamelCase_ : Optional[Any] =cached_features_file + '.lock'
with FileLock(__lowerCamelCase ):
if os.path.exists(__lowerCamelCase ) and not overwrite_cache:
logger.info(f'''Loading features from cached file {cached_features_file}''' )
UpperCamelCase_ : Optional[int] =torch.load(__lowerCamelCase )
else:
logger.info(f'''Creating features from dataset file at {data_dir}''' )
UpperCamelCase_ : Optional[Any] =(
processor.get_dev_examples(__lowerCamelCase ) if evaluate else processor.get_train_examples(__lowerCamelCase )
)
logger.info('Training examples: %s' , len(__lowerCamelCase ) )
UpperCamelCase_ : Tuple =hans_convert_examples_to_features(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
logger.info('Saving features into cached file %s' , __lowerCamelCase )
torch.save(self.features , __lowerCamelCase )
def __len__( self :List[str] ):
'''simple docstring'''
return len(self.features )
def __getitem__( self :List[str] , _lowerCamelCase :str ):
'''simple docstring'''
return self.features[i]
def lowerCamelCase_ ( self :Any ):
'''simple docstring'''
return self.label_list
if is_tf_available():
import tensorflow as tf
class a__ :
UpperCAmelCase__ = 42
def __init__( self :List[str] , _lowerCamelCase :str , _lowerCamelCase :PreTrainedTokenizer , _lowerCamelCase :str , _lowerCamelCase :Optional[int] = 128 , _lowerCamelCase :str=False , _lowerCamelCase :bool = False , ):
'''simple docstring'''
UpperCamelCase_ : List[str] =hans_processors[task]()
UpperCamelCase_ : Dict =processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCamelCase_ , UpperCamelCase_ : Tuple =label_list[2], label_list[1]
UpperCamelCase_ : Optional[Any] =label_list
UpperCamelCase_ : Optional[int] =processor.get_dev_examples(__lowerCamelCase ) if evaluate else processor.get_train_examples(__lowerCamelCase )
UpperCamelCase_ : Tuple =hans_convert_examples_to_features(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='convert examples to features' ):
if ex_index % 10_000 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(__lowerCamelCase )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
UpperCamelCase_ : Optional[Any] =tf.data.Dataset.from_generator(
__lowerCamelCase , (
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) , (
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def lowerCamelCase_ ( self :int ):
'''simple docstring'''
return self.dataset
def __len__( self :Union[str, Any] ):
'''simple docstring'''
return len(self.features )
def __getitem__( self :Union[str, Any] , _lowerCamelCase :Optional[Any] ):
'''simple docstring'''
return self.features[i]
def lowerCamelCase_ ( self :Any ):
'''simple docstring'''
return self.label_list
class a__ ( __snake_case ):
def lowerCamelCase_ ( self :str , _lowerCamelCase :str ):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(__lowerCamelCase , 'heuristics_train_set.txt' ) ) , 'train' )
def lowerCamelCase_ ( self :List[Any] , _lowerCamelCase :Any ):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(__lowerCamelCase , 'heuristics_evaluation_set.txt' ) ) , 'dev' )
def lowerCamelCase_ ( self :List[Any] ):
'''simple docstring'''
return ["contradiction", "entailment", "neutral"]
def lowerCamelCase_ ( self :int , _lowerCamelCase :List[str] , _lowerCamelCase :Union[str, Any] ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] =[]
for i, line in enumerate(__lowerCamelCase ):
if i == 0:
continue
UpperCamelCase_ : Optional[Any] ='%s-%s' % (set_type, line[0])
UpperCamelCase_ : Any =line[5]
UpperCamelCase_ : Dict =line[6]
UpperCamelCase_ : Tuple =line[7][2:] if line[7].startswith('ex' ) else line[7]
UpperCamelCase_ : Union[str, Any] =line[0]
examples.append(InputExample(guid=__lowerCamelCase , text_a=__lowerCamelCase , text_b=__lowerCamelCase , label=__lowerCamelCase , pairID=__lowerCamelCase ) )
return examples
def A_ ( __lowercase , __lowercase , __lowercase , __lowercase , ):
UpperCamelCase_ : str ={label: i for i, label in enumerate(lowerCAmelCase_ )}
UpperCamelCase_ : Optional[Any] =[]
for ex_index, example in tqdm.tqdm(enumerate(lowerCAmelCase_ ) , desc='convert examples to features' ):
if ex_index % 1_00_00 == 0:
logger.info('Writing example %d' % (ex_index) )
UpperCamelCase_ : Union[str, Any] =tokenizer(
example.text_a , example.text_b , add_special_tokens=lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding='max_length' , truncation=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , )
UpperCamelCase_ : Optional[int] =label_map[example.label] if example.label in label_map else 0
UpperCamelCase_ : Tuple =int(example.pairID )
features.append(InputFeatures(**lowerCAmelCase_ , label=lowerCAmelCase_ , pairID=lowerCAmelCase_ ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(F'''guid: {example}''' )
logger.info(F'''features: {features[i]}''' )
return features
__SCREAMING_SNAKE_CASE = {
'hans': 3,
}
__SCREAMING_SNAKE_CASE = {
'hans': HansProcessor,
}
| 357 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"""google/pix2struct-textcaps-base""": (
"""https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"""
),
}
class __lowercase ( __snake_case ):
UpperCamelCase = '''pix2struct_text_model'''
UpperCamelCase = ['''past_key_values''']
UpperCamelCase = {
'''hidden_size''': '''hidden_size''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Optional[Any] , __lowerCamelCase : Optional[int]=5_0_2_4_4 , __lowerCamelCase : int=7_6_8 , __lowerCamelCase : Union[str, Any]=6_4 , __lowerCamelCase : Dict=2_0_4_8 , __lowerCamelCase : int=1_2 , __lowerCamelCase : Any=1_2 , __lowerCamelCase : Union[str, Any]=3_2 , __lowerCamelCase : Any=1_2_8 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : str=1e-6 , __lowerCamelCase : int=1.0 , __lowerCamelCase : Optional[int]="gelu_new" , __lowerCamelCase : int=0 , __lowerCamelCase : int=False , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : Dict=1 , __lowerCamelCase : List[Any]=False , __lowerCamelCase : Optional[Any]=True , **__lowerCamelCase : Dict , ) -> int:
"""simple docstring"""
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = d_kv
UpperCAmelCase = d_ff
UpperCAmelCase = num_layers
UpperCAmelCase = num_heads
UpperCAmelCase = relative_attention_num_buckets
UpperCAmelCase = relative_attention_max_distance
UpperCAmelCase = dropout_rate
UpperCAmelCase = layer_norm_epsilon
UpperCAmelCase = initializer_factor
UpperCAmelCase = use_cache
UpperCAmelCase = eos_token_id
UpperCAmelCase = decoder_start_token_id
# for backwards compatibility
UpperCAmelCase = dense_act_fn
super().__init__(
pad_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , decoder_start_token_id=__lowerCamelCase , tie_word_embeddings=__lowerCamelCase , is_decoder=__lowerCamelCase , **__lowerCamelCase , )
@classmethod
def _lowercase ( cls : List[Any] , __lowerCamelCase : Union[str, os.PathLike] , **__lowerCamelCase : Any ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(__lowerCamelCase )
UpperCAmelCase , UpperCAmelCase = cls.get_config_dict(__lowerCamelCase , **__lowerCamelCase )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
UpperCAmelCase = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__lowerCamelCase , **__lowerCamelCase )
class __lowercase ( __snake_case ):
UpperCamelCase = '''pix2struct_vision_model'''
def __init__( self : str , __lowerCamelCase : Any=7_6_8 , __lowerCamelCase : Dict=7_6_8 , __lowerCamelCase : List[Any]=2_0_4_8 , __lowerCamelCase : List[Any]=6_4 , __lowerCamelCase : Union[str, Any]=1_2 , __lowerCamelCase : Optional[int]=1_2 , __lowerCamelCase : Tuple="gelu_new" , __lowerCamelCase : Tuple=1e-6 , __lowerCamelCase : Any=0.0 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : int=1e-1_0 , __lowerCamelCase : Union[str, Any]=1.0 , __lowerCamelCase : Any=4_0_9_6 , __lowerCamelCase : Dict=3_2 , __lowerCamelCase : Any=1_2_8 , **__lowerCamelCase : Optional[int] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**__lowerCamelCase )
UpperCAmelCase = hidden_size
UpperCAmelCase = patch_embed_hidden_size
UpperCAmelCase = d_ff
UpperCAmelCase = dropout_rate
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = initializer_range
UpperCAmelCase = initializer_factor
UpperCAmelCase = attention_dropout
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = dense_act_fn
UpperCAmelCase = seq_len
UpperCAmelCase = relative_attention_num_buckets
UpperCAmelCase = relative_attention_max_distance
UpperCAmelCase = d_kv
@classmethod
def _lowercase ( cls : Any , __lowerCamelCase : Union[str, os.PathLike] , **__lowerCamelCase : int ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(__lowerCamelCase )
UpperCAmelCase , UpperCAmelCase = cls.get_config_dict(__lowerCamelCase , **__lowerCamelCase )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
UpperCAmelCase = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__lowerCamelCase , **__lowerCamelCase )
class __lowercase ( __snake_case ):
UpperCamelCase = '''pix2struct'''
UpperCamelCase = True
def __init__( self : str , __lowerCamelCase : str=None , __lowerCamelCase : Dict=None , __lowerCamelCase : Optional[Any]=1.0 , __lowerCamelCase : str=0.02 , __lowerCamelCase : Any=False , __lowerCamelCase : Tuple=False , __lowerCamelCase : str=True , **__lowerCamelCase : Dict , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(tie_word_embeddings=__lowerCamelCase , is_encoder_decoder=__lowerCamelCase , **__lowerCamelCase )
if text_config is None:
UpperCAmelCase = {}
logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" )
if vision_config is None:
UpperCAmelCase = {}
logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" )
UpperCAmelCase = PixaStructTextConfig(**__lowerCamelCase )
UpperCAmelCase = PixaStructVisionConfig(**__lowerCamelCase )
UpperCAmelCase = self.text_config.decoder_start_token_id
UpperCAmelCase = self.text_config.pad_token_id
UpperCAmelCase = self.text_config.eos_token_id
UpperCAmelCase = initializer_factor
UpperCAmelCase = initializer_range
UpperCAmelCase = self.initializer_range
UpperCAmelCase = self.initializer_range
UpperCAmelCase = is_vqa
@classmethod
def _lowercase ( cls : List[Any] , __lowerCamelCase : PixaStructTextConfig , __lowerCamelCase : PixaStructVisionConfig , **__lowerCamelCase : Dict ) -> Union[str, Any]:
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__lowerCamelCase )
def _lowercase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = copy.deepcopy(self.__dict__ )
UpperCAmelCase = self.text_config.to_dict()
UpperCAmelCase = self.vision_config.to_dict()
UpperCAmelCase = self.__class__.model_type
return output
| 377 | 0 |
"""simple docstring"""
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"""kwargs, expected""" , [
({"""num_shards""": 0, """max_num_jobs""": 1}, []),
({"""num_shards""": 1_0, """max_num_jobs""": 1}, [range(1_0 )]),
({"""num_shards""": 1_0, """max_num_jobs""": 1_0}, [range(_SCREAMING_SNAKE_CASE , i + 1 ) for i in range(1_0 )]),
({"""num_shards""": 1, """max_num_jobs""": 1_0}, [range(1 )]),
({"""num_shards""": 1_0, """max_num_jobs""": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 1_0 )]),
({"""num_shards""": 3, """max_num_jobs""": 1_0}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
UpperCAmelCase__ = _distribute_shards(**_SCREAMING_SNAKE_CASE )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, max_num_jobs, expected""" , [
({"""foo""": 0}, 1_0, [{"""foo""": 0}]),
({"""shards""": [0, 1, 2, 3]}, 1, [{"""shards""": [0, 1, 2, 3]}]),
({"""shards""": [0, 1, 2, 3]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}, {"""shards""": [2]}, {"""shards""": [3]}]),
({"""shards""": [0, 1]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}]),
({"""shards""": [0, 1, 2, 3]}, 2, [{"""shards""": [0, 1]}, {"""shards""": [2, 3]}]),
] , )
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
UpperCAmelCase__ = _split_gen_kwargs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, expected""" , [
({"""foo""": 0}, 1),
({"""shards""": [0]}, 1),
({"""shards""": [0, 1, 2, 3]}, 4),
({"""shards""": [0, 1, 2, 3], """foo""": 0}, 4),
({"""shards""": [0, 1, 2, 3], """other""": (0, 1)}, 4),
({"""shards""": [0, 1, 2, 3], """shards2""": [0, 1]}, RuntimeError),
] , )
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
if expected is RuntimeError:
with pytest.raises(_SCREAMING_SNAKE_CASE ):
_number_of_shards_in_gen_kwargs(_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase__ = _number_of_shards_in_gen_kwargs(_SCREAMING_SNAKE_CASE )
assert out == expected
| 422 |
"""simple docstring"""
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
a : Tuple = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , __lowercase ):
super().__init__()
UpperCAmelCase__ = torchvision.models.resnetaaa(pretrained=__lowercase )
UpperCAmelCase__ = list(model.children() )[:-2]
UpperCAmelCase__ = nn.Sequential(*__lowercase )
UpperCAmelCase__ = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def A__ ( self , __lowercase ):
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
UpperCAmelCase__ = self.pool(self.model(__lowercase ) )
UpperCAmelCase__ = torch.flatten(__lowercase , start_dim=2 )
UpperCAmelCase__ = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class _UpperCamelCase ( __UpperCamelCase ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ):
UpperCAmelCase__ = [json.loads(__lowercase ) for l in open(__lowercase )]
UpperCAmelCase__ = os.path.dirname(__lowercase )
UpperCAmelCase__ = tokenizer
UpperCAmelCase__ = labels
UpperCAmelCase__ = len(__lowercase )
UpperCAmelCase__ = max_seq_length
UpperCAmelCase__ = transforms
def __len__( self ):
return len(self.data )
def __getitem__( self , __lowercase ):
UpperCAmelCase__ = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] , add_special_tokens=__lowercase ) )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = sentence[0], sentence[1:-1], sentence[-1]
UpperCAmelCase__ = sentence[: self.max_seq_length]
UpperCAmelCase__ = torch.zeros(self.n_classes )
UpperCAmelCase__ = 1
UpperCAmelCase__ = Image.open(os.path.join(self.data_dir , self.data[index]["""img"""] ) ).convert("""RGB""" )
UpperCAmelCase__ = self.transforms(__lowercase )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def A__ ( self ):
UpperCAmelCase__ = Counter()
for row in self.data:
label_freqs.update(row["""label"""] )
return label_freqs
def snake_case__ ( _SCREAMING_SNAKE_CASE ) ->List[str]:
UpperCAmelCase__ = [len(row["""sentence"""] ) for row in batch]
UpperCAmelCase__ , UpperCAmelCase__ = len(_SCREAMING_SNAKE_CASE ), max(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = torch.zeros(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=torch.long )
UpperCAmelCase__ = torch.zeros(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ):
UpperCAmelCase__ = input_row["""sentence"""]
UpperCAmelCase__ = 1
UpperCAmelCase__ = torch.stack([row["""image"""] for row in batch] )
UpperCAmelCase__ = torch.stack([row["""label"""] for row in batch] )
UpperCAmelCase__ = torch.stack([row["""image_start_token"""] for row in batch] )
UpperCAmelCase__ = torch.stack([row["""image_end_token"""] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def snake_case__ ( ) ->int:
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def snake_case__ ( ) ->str:
return transforms.Compose(
[
transforms.Resize(2_5_6 ),
transforms.CenterCrop(2_2_4 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.4677_7044, 0.4453_1429, 0.4066_1017] , std=[0.1222_1994, 0.1214_5835, 0.1438_0469] , ),
] )
| 422 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCamelCase_ = {"processing_wav2vec2_with_lm": ["Wav2Vec2ProcessorWithLM"]}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 28 |
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : List[str] = int(lowerCamelCase_ )
_lowercase , _lowercase , _lowercase : Optional[Any] = t // 3600, (t // 60) % 60, t % 60
return F'''{h}:{m:02d}:{s:02d}''' if h != 0 else F'''{m:02d}:{s:02d}'''
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=300 ) -> Dict:
# docstyle-ignore
return F'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
_lowercase : int = '<table border="1" class="dataframe">\n'
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += F''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
_lowercase : Any = F'''{elt:.6f}''' if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else str(lowerCamelCase_ )
html_code += F''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class _lowerCamelCase:
lowercase_ : str = 5
lowercase_ : str = 0.2
def __init__( self, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = True, lowerCamelCase = None, lowerCamelCase = 3_00, ) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[int] = total
_lowercase : Optional[int] = '' if prefix is None else prefix
_lowercase : Tuple = leave
_lowercase : str = parent
_lowercase : str = width
_lowercase : List[Any] = None
_lowercase : List[str] = None
_lowercase : Tuple = None
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = False, lowerCamelCase = None) -> Dict:
"""simple docstring"""
_lowercase : Any = value
if comment is not None:
_lowercase : Union[str, Any] = comment
if self.last_value is None:
_lowercase : Dict = time.time()
_lowercase : Tuple = value
_lowercase : str = None
_lowercase : Optional[int] = self.warmup
_lowercase : Optional[Any] = 1
self.update_bar(lowerCamelCase)
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for, self.total):
if self.first_calls > 0:
self.first_calls -= 1
_lowercase : List[str] = time.time()
_lowercase : Tuple = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
_lowercase : Dict = self.elapsed_time / (value - self.start_value)
else:
_lowercase : int = None
if value >= self.total:
_lowercase : Dict = self.total
_lowercase : List[str] = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
_lowercase : Optional[int] = self.average_time_per_item * (self.total - value)
self.update_bar(lowerCamelCase)
_lowercase : int = value
_lowercase : Tuple = current_time
if self.average_time_per_item is None:
_lowercase : str = 1
else:
_lowercase : int = max(int(self.update_every / self.average_time_per_item), 1)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=None) -> Optional[Any]:
"""simple docstring"""
_lowercase : List[Any] = ' ' * (len(str(self.total)) - len(str(lowerCamelCase))) + str(lowerCamelCase)
if self.elapsed_time is None:
_lowercase : int = F'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
_lowercase : Union[str, Any] = F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time)}'''
else:
_lowercase : Union[str, Any] = (
F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time)} <'''
F''' {format_time(self.predicted_remaining)}'''
)
self.label += F''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment) == 0 else F''', {self.comment}]'''
self.display()
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Any = html_progress_bar(self.value, self.total, self.prefix, self.label, self.width)
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
_lowercase : Optional[Any] = disp.display(disp.HTML(self.html_code), display_id=lowerCamelCase)
else:
self.output.update(disp.HTML(self.html_code))
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
if self.parent is None and self.output is not None:
self.output.update(disp.HTML(''))
class _lowerCamelCase( _a ):
def __init__( self, lowerCamelCase, lowerCamelCase=None) -> int:
"""simple docstring"""
super().__init__(lowerCamelCase)
_lowercase : Optional[Any] = None if column_names is None else [column_names]
_lowercase : Any = None
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Any = html_progress_bar(self.value, self.total, self.prefix, self.label, self.width)
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table)
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
_lowercase : Dict = disp.display(disp.HTML(self.html_code), display_id=lowerCamelCase)
else:
self.output.update(disp.HTML(self.html_code))
def UpperCamelCase ( self, lowerCamelCase) -> Dict:
"""simple docstring"""
if self.inner_table is None:
_lowercase : Dict = [list(values.keys()), list(values.values())]
else:
_lowercase : Tuple = self.inner_table[0]
if len(self.inner_table) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(lowerCamelCase)
_lowercase : str = columns
self.inner_table.append([values[c] for c in columns])
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=None, lowerCamelCase=3_00) -> Union[str, Any]:
"""simple docstring"""
_lowercase : List[str] = NotebookProgressBar(lowerCamelCase, prefix=lowerCamelCase, parent=self, width=lowerCamelCase)
return self.child_bar
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Optional[Any] = None
self.display()
class _lowerCamelCase( _a ):
def __init__( self) -> List[Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = None
_lowercase : Dict = None
_lowercase : Dict = False
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : Dict = 'Epoch' if args.evaluation_strategy == IntervalStrategy.EPOCH else 'Step'
_lowercase : Dict = 0
_lowercase : Tuple = 0
_lowercase : int = [self.first_column] + ['Training Loss']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('Validation Loss')
_lowercase : Union[str, Any] = NotebookTrainingTracker(state.max_steps, lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Any = int(state.epoch) if int(state.epoch) == state.epoch else F'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1, comment=F'''Epoch {epoch}/{state.num_train_epochs}''', force_update=self._force_next_update, )
_lowercase : str = False
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> Any:
"""simple docstring"""
if not has_length(lowerCamelCase):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
_lowercase : Optional[int] = self.training_tracker.add_child(len(lowerCamelCase))
else:
_lowercase : Optional[int] = NotebookProgressBar(len(lowerCamelCase))
self.prediction_bar.update(1)
else:
self.prediction_bar.update(self.prediction_bar.value + 1)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase) -> Optional[int]:
"""simple docstring"""
if self.prediction_bar is not None:
self.prediction_bar.close()
_lowercase : Any = None
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> List[Any]:
"""simple docstring"""
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
_lowercase : Dict = {'Training Loss': logs['loss']}
# First column is necessarily Step sine we're not in epoch eval strategy
_lowercase : List[Any] = state.global_step
self.training_tracker.write_line(lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> List[str]:
"""simple docstring"""
if self.training_tracker is not None:
_lowercase : Tuple = {'Training Loss': 'No log', 'Validation Loss': 'No log'}
for log in reversed(state.log_history):
if "loss" in log:
_lowercase : int = log['loss']
break
if self.first_column == "Epoch":
_lowercase : Union[str, Any] = int(state.epoch)
else:
_lowercase : Optional[Any] = state.global_step
_lowercase : str = 'eval'
for k in metrics:
if k.endswith('_loss'):
_lowercase : str = re.sub(R'\_loss$', '', lowerCamelCase)
_lowercase : Tuple = metrics.pop('total_flos', lowerCamelCase)
_lowercase : List[str] = metrics.pop('epoch', lowerCamelCase)
_lowercase : List[Any] = metrics.pop(F'''{metric_key_prefix}_runtime''', lowerCamelCase)
_lowercase : Dict = metrics.pop(F'''{metric_key_prefix}_samples_per_second''', lowerCamelCase)
_lowercase : Tuple = metrics.pop(F'''{metric_key_prefix}_steps_per_second''', lowerCamelCase)
_lowercase : List[str] = metrics.pop(F'''{metric_key_prefix}_jit_compilation_time''', lowerCamelCase)
for k, v in metrics.items():
if k == F'''{metric_key_prefix}_loss''':
_lowercase : Union[str, Any] = v
else:
_lowercase : Optional[Any] = k.split('_')
_lowercase : Optional[int] = ' '.join([part.capitalize() for part in splits[1:]])
_lowercase : Tuple = v
self.training_tracker.write_line(lowerCamelCase)
self.training_tracker.remove_child()
_lowercase : str = None
# Evaluation takes a long time so we should force the next update.
_lowercase : Optional[Any] = True
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
self.training_tracker.update(
state.global_step, comment=F'''Epoch {int(state.epoch)}/{state.num_train_epochs}''', force_update=lowerCamelCase)
_lowercase : Any = None
| 89 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase = {'''configuration_vit_msn''': ['''VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMSNConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'''VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMSNModel''',
'''ViTMSNForImageClassification''',
'''ViTMSNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 714 |
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A = None
A = BloomTokenizerFast
A = BloomTokenizerFast
A = True
A = False
A = "tokenizer_file"
A = {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
def a_ (self ) -> Dict:
super().setUp()
__UpperCamelCase : Optional[int] = BloomTokenizerFast.from_pretrained("bigscience/tokenizer" )
tokenizer.save_pretrained(self.tmpdirname )
def a_ (self , **_UpperCAmelCase ) -> str:
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def a_ (self ) -> List[str]:
__UpperCamelCase : Union[str, Any] = self.get_rust_tokenizer()
__UpperCamelCase : Optional[Any] = ["The quick brown fox</s>", "jumps over the lazy dog</s>"]
__UpperCamelCase : Optional[Any] = [[2_1_7_5, 2_3_7_1_4, 7_3_1_7_3, 1_4_4_2_5_2, 2], [7_7, 1_3_2_6_1_9, 3_4_7_8, 3_6_8, 1_0_9_5_8_6, 3_5_4_3_3, 2]]
__UpperCamelCase : Dict = tokenizer.batch_encode_plus(_UpperCAmelCase )["input_ids"]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase : List[str] = tokenizer.batch_decode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def a_ (self , _UpperCAmelCase=6 ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__UpperCamelCase : Any = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
__UpperCamelCase : List[Any] = "This is a simple input"
__UpperCamelCase : int = ["This is a simple input 1", "This is a simple input 2"]
__UpperCamelCase : Any = ("This is a simple input", "This is a pair")
__UpperCamelCase : List[Any] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
try:
tokenizer_r.encode(_UpperCAmelCase , max_length=_UpperCAmelCase )
tokenizer_r.encode_plus(_UpperCAmelCase , max_length=_UpperCAmelCase )
tokenizer_r.batch_encode_plus(_UpperCAmelCase , max_length=_UpperCAmelCase )
tokenizer_r.encode(_UpperCAmelCase , max_length=_UpperCAmelCase )
tokenizer_r.batch_encode_plus(_UpperCAmelCase , max_length=_UpperCAmelCase )
except ValueError:
self.fail("Bloom Tokenizer should be able to deal with padding" )
__UpperCamelCase : List[Any] = None # Hotfixing padding = None
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="max_length" )
# Simple input
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="max_length" )
# Simple input
self.assertRaises(
_UpperCAmelCase , tokenizer_r.batch_encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="max_length" , )
# Pair input
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="max_length" )
# Pair input
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="max_length" )
# Pair input
self.assertRaises(
_UpperCAmelCase , tokenizer_r.batch_encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="max_length" , )
def a_ (self ) -> Tuple:
__UpperCamelCase : List[str] = self.get_rust_tokenizer()
__UpperCamelCase : Any = load_dataset("xnli" , "all_languages" , split="test" , streaming=_UpperCAmelCase )
__UpperCamelCase : List[Any] = next(iter(_UpperCAmelCase ) )["premise"] # pick up one data
__UpperCamelCase : Dict = list(sample_data.values() )
__UpperCamelCase : Optional[Any] = list(map(tokenizer.encode , _UpperCAmelCase ) )
__UpperCamelCase : List[Any] = [tokenizer.decode(_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase ) for x in output_tokens]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def a_ (self ) -> Optional[Any]:
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 399 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase_ : int = {
"configuration_maskformer": ["MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "MaskFormerConfig"],
"configuration_maskformer_swin": ["MaskFormerSwinConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[int] = ["MaskFormerFeatureExtractor"]
UpperCAmelCase_ : str = ["MaskFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Union[str, Any] = [
"MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"MaskFormerForInstanceSegmentation",
"MaskFormerModel",
"MaskFormerPreTrainedModel",
]
UpperCAmelCase_ : int = [
"MaskFormerSwinBackbone",
"MaskFormerSwinModel",
"MaskFormerSwinPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 120 |
'''simple docstring'''
def snake_case_ ( lowercase__ ):
if len(lowercase__ ) <= 1:
return [tuple(lowercase__ )]
UpperCAmelCase__ : Union[str, Any] = []
def generate(lowercase__ , lowercase__ ):
UpperCAmelCase__ : str = [0] * n
res.append(tuple(lowercase__ ) )
UpperCAmelCase__ : Any = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
UpperCAmelCase__ , UpperCAmelCase__ : str = arr[i], arr[0]
else:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = arr[i], arr[c[i]]
res.append(tuple(lowercase__ ) )
c[i] += 1
UpperCAmelCase__ : List[str] = 0
else:
UpperCAmelCase__ : Dict = 0
i += 1
generate(len(lowercase__ ) , lowercase__ )
return res
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = input("""Enter numbers separated by a comma:\n""").strip()
SCREAMING_SNAKE_CASE = [int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 199 | 0 |
def lowerCamelCase_ ( _lowercase ) -> int:
__A : Dict = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def lowerCamelCase_ ( _lowercase ) -> int:
__A : Tuple = 0
while number > 0:
__A : Union[str, Any] = number % 10
sum_of_digits += last_digit
__A : List[str] = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def lowerCamelCase_ ( _lowercase = 100 ) -> int:
__A : Any = factorial(_lowercase )
__A : Optional[int] = split_and_add(_lowercase )
return result
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 387 | from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
UpperCamelCase = TypeVar('T')
class _a ( Generic[T] ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
__A : Any | T = None
__A : int = len(__UpperCAmelCase )
__A : list[T] = [any_type for _ in range(self.N )] + arr
__A : Any = fnc
self.build()
def __UpperCAmelCase( self ):
for p in range(self.N - 1 , 0 , -1 ):
__A : Dict = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase ):
p += self.N
__A : List[Any] = v
while p > 1:
__A : Optional[Any] = p // 2
__A : str = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase ): # noqa: E741
__A , __A : Dict = l + self.N, r + self.N
__A : T | None = None
while l <= r:
if l % 2 == 1:
__A : Union[str, Any] = self.st[l] if res is None else self.fn(__UpperCAmelCase , self.st[l] )
if r % 2 == 0:
__A : Optional[Any] = self.st[r] if res is None else self.fn(__UpperCAmelCase , self.st[r] )
__A , __A : str = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
UpperCamelCase = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
UpperCamelCase = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
UpperCamelCase = SegmentTree(test_array, min)
UpperCamelCase = SegmentTree(test_array, max)
UpperCamelCase = SegmentTree(test_array, lambda a, b: a + b)
def lowerCamelCase_ ( ) -> None:
for i in range(len(_lowercase ) ):
for j in range(_lowercase , len(_lowercase ) ):
__A : Dict = reduce(_lowercase , test_array[i : j + 1] )
__A : int = reduce(_lowercase , test_array[i : j + 1] )
__A : Dict = reduce(lambda _lowercase , _lowercase : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(_lowercase , _lowercase )
assert max_range == max_segment_tree.query(_lowercase , _lowercase )
assert sum_range == sum_segment_tree.query(_lowercase , _lowercase )
test_all_segments()
for index, value in test_updates.items():
UpperCamelCase = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 387 | 1 |
"""simple docstring"""
import argparse
from collections import defaultdict
import yaml
__UpperCamelCase : Union[str, Any] = '''docs/source/en/_toctree.yml'''
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ):
lowerCAmelCase = defaultdict(_UpperCAmelCase )
for doc in model_doc:
counts[doc["local"]] += 1
lowerCAmelCase = [key for key, value in counts.items() if value > 1]
lowerCAmelCase = []
for duplicate_key in duplicates:
lowerCAmelCase = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} )
if len(_UpperCAmelCase ) > 1:
raise ValueError(
F'{duplicate_key} is present several times in the documentation table of content at '
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] )
# Sort
return sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : s["title"].lower() )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any=False ):
with open(_UpperCAmelCase , encoding='utf-8' ) as f:
lowerCAmelCase = yaml.safe_load(f.read() )
# Get to the API doc
lowerCAmelCase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowerCAmelCase = content[api_idx]['sections']
# Then to the model doc
lowerCAmelCase = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
lowerCAmelCase = api_doc[model_idx]['sections']
lowerCAmelCase = [(idx, section) for idx, section in enumerate(_UpperCAmelCase ) if 'sections' in section]
lowerCAmelCase = False
for idx, modality_doc in modalities_docs:
lowerCAmelCase = modality_doc['sections']
lowerCAmelCase = clean_model_doc_toc(_UpperCAmelCase )
if old_modality_doc != new_modality_doc:
lowerCAmelCase = True
if overwrite:
lowerCAmelCase = new_modality_doc
if diff:
if overwrite:
lowerCAmelCase = model_doc
lowerCAmelCase = api_doc
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(_UpperCAmelCase , allow_unicode=_UpperCAmelCase ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
__UpperCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__UpperCamelCase : int = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 4 |
"""simple docstring"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list ):
lowerCAmelCase = np.array([[1, item, train_mtch[i]] for i, item in enumerate(_UpperCAmelCase )] )
lowerCAmelCase = np.array(_UpperCAmelCase )
lowerCAmelCase = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , _UpperCAmelCase ) ) , x.transpose() ) , _UpperCAmelCase )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list ):
lowerCAmelCase = (1, 2, 1)
lowerCAmelCase = (1, 1, 0, 7)
lowerCAmelCase = SARIMAX(
_UpperCAmelCase , exog=_UpperCAmelCase , order=_UpperCAmelCase , seasonal_order=_UpperCAmelCase )
lowerCAmelCase = model.fit(disp=_UpperCAmelCase , maxiter=600 , method='nm' )
lowerCAmelCase = model_fit.predict(1 , len(_UpperCAmelCase ) , exog=[test_match] )
return result[0]
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list ):
lowerCAmelCase = SVR(kernel='rbf' , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = regressor.predict(_UpperCAmelCase )
return y_pred[0]
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list ):
train_user.sort()
lowerCAmelCase = np.percentile(_UpperCAmelCase , 25 )
lowerCAmelCase = np.percentile(_UpperCAmelCase , 75 )
lowerCAmelCase = qa - qa
lowerCAmelCase = qa - (iqr * 0.1)
return low_lim
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : float ):
lowerCAmelCase = 0
lowerCAmelCase = 0
for i in list_vote:
if i > actual_result:
lowerCAmelCase = not_safe + 1
else:
if abs(abs(_UpperCAmelCase ) - abs(_UpperCAmelCase ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
__UpperCamelCase : Optional[Any] = [[1_8231, 0.0, 1], [2_2621, 1.0, 2], [1_5675, 0.0, 3], [2_3583, 1.0, 4]]
__UpperCamelCase : Any = pd.DataFrame(
data_input, columns=['''total_user''', '''total_even''', '''days''']
)
__UpperCamelCase : Dict = Normalizer().fit_transform(data_input_df.values)
# split data
__UpperCamelCase : Dict = normalize_df[:, 2].tolist()
__UpperCamelCase : Union[str, Any] = normalize_df[:, 0].tolist()
__UpperCamelCase : List[str] = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
__UpperCamelCase : Optional[int] = normalize_df[:, [1, 2]].tolist()
__UpperCamelCase : Tuple = x[: len(x) - 1]
__UpperCamelCase : Any = x[len(x) - 1 :]
# for linear regression & sarimax
__UpperCamelCase : str = total_date[: len(total_date) - 1]
__UpperCamelCase : Union[str, Any] = total_user[: len(total_user) - 1]
__UpperCamelCase : List[Any] = total_match[: len(total_match) - 1]
__UpperCamelCase : Optional[Any] = total_date[len(total_date) - 1 :]
__UpperCamelCase : str = total_user[len(total_user) - 1 :]
__UpperCamelCase : str = total_match[len(total_match) - 1 :]
# voting system with forecasting
__UpperCamelCase : Any = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
__UpperCamelCase : List[str] = '''''' if data_safety_checker(res_vote, tst_user) else '''not '''
print('''Today\'s data is {not_str}safe.''')
| 4 | 1 |
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class lowerCamelCase ( _snake_case ):
_SCREAMING_SNAKE_CASE = ""
_SCREAMING_SNAKE_CASE = "hf-legacy" # "hf://"" is reserved for hffs
def __init__( self : Optional[int] , __snake_case : Optional[DatasetInfo] = None , __snake_case : Optional[str] = None , **__snake_case : Union[str, Any] , ):
'''simple docstring'''
super().__init__(self , **lowerCAmelCase__ )
_snake_case: str = repo_info
_snake_case: Optional[int] = token
_snake_case: str = None
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
if self.dir_cache is None:
_snake_case: List[str] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
_snake_case: List[str] = {
'name': hf_file.rfilename,
'size': None,
'type': 'file',
}
self.dir_cache.update(
{
str(lowerCAmelCase__ ): {'name': str(lowerCAmelCase__ ), 'size': None, 'type': 'directory'}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , __snake_case : str , __snake_case : str = "rb" , **__snake_case : List[str] , ):
'''simple docstring'''
if not isinstance(self.repo_info , lowerCAmelCase__ ):
raise NotImplementedError(f'''Open is only implemented for dataset repositories, but got {self.repo_info}''' )
_snake_case: Tuple = hf_hub_url(self.repo_info.id , lowerCAmelCase__ , revision=self.repo_info.sha )
return fsspec.open(
lowerCAmelCase__ , mode=lowerCAmelCase__ , headers=get_authentication_headers_for_url(lowerCAmelCase__ , use_auth_token=self.token ) , client_kwargs={'trust_env': True} , ).open()
def SCREAMING_SNAKE_CASE_ ( self : str , __snake_case : Dict , **__snake_case : int ):
'''simple docstring'''
self._get_dirs()
_snake_case: Optional[Any] = self._strip_protocol(lowerCAmelCase__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , __snake_case : str , __snake_case : str=False , **__snake_case : Any ):
'''simple docstring'''
self._get_dirs()
_snake_case: List[Any] = PurePosixPath(path.strip('/' ) )
_snake_case: str = {}
for p, f in self.dir_cache.items():
_snake_case: Any = PurePosixPath(p.strip('/' ) )
_snake_case: List[Any] = p.parent
if root == path:
_snake_case: str = f
_snake_case: Any = list(paths.values() )
if detail:
return out
else:
return sorted(f['name'] for f in out )
| 701 |
'''simple docstring'''
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class lowerCamelCase ( nn.Module ):
def __init__( self : Optional[int] ):
'''simple docstring'''
super().__init__()
_snake_case: int = nn.Linear(3 , 4 )
_snake_case: Any = nn.BatchNormad(4 )
_snake_case: Union[str, Any] = nn.Linear(4 , 5 )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , __snake_case : List[Any] ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(__snake_case ) ) )
class lowerCamelCase ( __UpperCAmelCase ):
def SCREAMING_SNAKE_CASE_ ( self : Tuple , __snake_case : Optional[Any] , *__snake_case : Optional[Any] , **__snake_case : List[Any] ):
'''simple docstring'''
return (args[0] + 1,) + args[1:], kwargs
class lowerCamelCase ( __UpperCAmelCase ):
def SCREAMING_SNAKE_CASE_ ( self : Dict , __snake_case : Union[str, Any] , __snake_case : Tuple ):
'''simple docstring'''
return output + 1
class lowerCamelCase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case: str = ModelForTest()
_snake_case: List[str] = ModelHook()
add_hook_to_module(__snake_case , __snake_case )
self.assertEqual(test_model._hf_hook , __snake_case )
self.assertTrue(hasattr(__snake_case , '_old_forward' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , 'forward' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] )
remove_hook_from_module(__snake_case )
self.assertFalse(hasattr(__snake_case , '_hf_hook' ) )
self.assertFalse(hasattr(__snake_case , '_old_forward' ) )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
_snake_case: Optional[Any] = ModelForTest()
_snake_case: Union[str, Any] = ModelHook()
add_hook_to_module(__snake_case , __snake_case )
add_hook_to_module(__snake_case , __snake_case , append=__snake_case )
self.assertEqual(isinstance(test_model._hf_hook , __snake_case ) , __snake_case )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__snake_case , '_old_forward' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , 'forward' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] )
remove_hook_from_module(__snake_case )
self.assertFalse(hasattr(__snake_case , '_hf_hook' ) )
self.assertFalse(hasattr(__snake_case , '_old_forward' ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case: Dict = ModelForTest()
_snake_case: Tuple = torch.randn(2 , 3 )
_snake_case: List[Any] = test_model(x + 1 )
_snake_case: int = test_model(x + 2 )
_snake_case: List[str] = PreForwardHook()
add_hook_to_module(__snake_case , __snake_case )
_snake_case: List[Any] = test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_snake_case: Any = PreForwardHook()
add_hook_to_module(__snake_case , __snake_case )
_snake_case: Dict = test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
_snake_case: Any = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__snake_case , __snake_case )
_snake_case: int = test_model(__snake_case )
assert torch.allclose(__snake_case , __snake_case , atol=1e-5 )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
_snake_case: int = ModelForTest()
_snake_case: str = torch.randn(2 , 3 )
_snake_case: int = test_model(__snake_case )
_snake_case: Union[str, Any] = PostForwardHook()
add_hook_to_module(__snake_case , __snake_case )
_snake_case: int = test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , output + 1 , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_snake_case: Tuple = PostForwardHook()
add_hook_to_module(__snake_case , __snake_case )
_snake_case: Optional[int] = test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , output + 1 , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
_snake_case: List[str] = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__snake_case , __snake_case )
_snake_case: List[Any] = test_model(__snake_case )
assert torch.allclose(__snake_case , output + 2 , atol=1e-5 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case: Optional[Any] = ModelForTest()
_snake_case: Dict = torch.randn(2 , 3 )
_snake_case: Tuple = test_model(__snake_case )
_snake_case: Tuple = PostForwardHook()
add_hook_to_module(__snake_case , __snake_case )
_snake_case: List[str] = test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , output + 1 ) )
self.assertTrue(outputa.requires_grad )
_snake_case: Any = True
_snake_case: str = test_model(__snake_case )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
_snake_case: Any = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
_snake_case: int = torch.randn(2 , 3 )
_snake_case: Tuple = model(__snake_case )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__snake_case , AlignDevicesHook(io_same_device=__snake_case ) )
_snake_case: Optional[Any] = torch.randn(2 , 3 ).to(0 )
_snake_case: Any = model(__snake_case )
self.assertEqual(output.device , torch.device(0 ) )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
_snake_case: Union[str, Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
_snake_case: Optional[int] = {'execution_device': 0 if torch.cuda.is_available() else 'cpu', 'offload': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
_snake_case: Optional[int] = torch.device(hook_kwargs['execution_device'] )
self.assertEqual(model.batchnorm.running_mean.device , __snake_case )
_snake_case: Tuple = torch.randn(2 , 3 )
_snake_case: Union[str, Any] = model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
_snake_case: Optional[Any] = {
'execution_device': 0 if torch.cuda.is_available() else 'cpu',
'offload': True,
'offload_buffers': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
_snake_case: Optional[Any] = torch.randn(2 , 3 )
_snake_case: Union[str, Any] = model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
_snake_case: Union[str, Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
_snake_case: Optional[Any] = 0 if torch.cuda.is_available() else 'cpu'
attach_align_device_hook(__snake_case , execution_device=__snake_case , offload=__snake_case )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
_snake_case: List[str] = torch.device(__snake_case )
self.assertEqual(model.batchnorm.running_mean.device , __snake_case )
_snake_case: Optional[Any] = torch.randn(2 , 3 )
_snake_case: List[str] = model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
attach_align_device_hook(__snake_case , execution_device=__snake_case , offload=__snake_case , offload_buffers=__snake_case )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
_snake_case: Any = torch.randn(2 , 3 )
_snake_case: List[Any] = model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
_snake_case: Tuple = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
_snake_case: Tuple = 0 if torch.cuda.is_available() else 'cpu'
attach_align_device_hook(
__snake_case , execution_device=__snake_case , offload=__snake_case , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
_snake_case: str = torch.device(__snake_case )
self.assertEqual(model.batchnorm.running_mean.device , __snake_case )
_snake_case: Dict = torch.randn(2 , 3 )
_snake_case: Dict = model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__snake_case , execution_device=__snake_case , offload=__snake_case , weights_map=model.state_dict() , offload_buffers=__snake_case , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
_snake_case: List[Any] = torch.randn(2 , 3 )
_snake_case: List[str] = model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
| 273 | 0 |
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
return int((input_a, input_a).count(0 ) == 0 )
def lowerCAmelCase_ ( ):
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 81 | """simple docstring"""
import math
from collections.abc import Iterator
from itertools import takewhile
def _A( lowerCAmelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _A( ):
A__ : List[str] = 2
while True:
if is_prime(lowerCAmelCase ):
yield num
num += 1
def _A( lowerCAmelCase = 200_0000 ):
return sum(takewhile(lambda lowerCAmelCase : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F'{solution() = }')
| 363 | 0 |
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
snake_case : int = logging.get_logger(__name__)
snake_case : int = {
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/config.json',
# See all BART models at https://huggingface.co/models?filter=bart
}
class lowerCAmelCase__ ( UpperCamelCase ):
__A : Tuple = 'bart'
__A : Optional[Any] = ['past_key_values']
__A : Union[str, Any] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Dict , _A : Any=5_0265 , _A : Optional[int]=1024 , _A : Any=12 , _A : Union[str, Any]=4096 , _A : Optional[int]=16 , _A : List[Any]=12 , _A : Any=4096 , _A : Tuple=16 , _A : List[str]=0.0 , _A : Union[str, Any]=0.0 , _A : List[Any]="gelu" , _A : List[Any]=1024 , _A : Dict=0.1 , _A : Optional[Any]=0.0 , _A : Union[str, Any]=0.0 , _A : Any=0.02 , _A : Tuple=0.0 , _A : Union[str, Any]=False , _A : Dict=True , _A : str=3 , _A : List[Any]=1 , _A : Union[str, Any]=0 , _A : List[Any]=2 , _A : Optional[int]=True , _A : Optional[Any]=2 , _A : Dict=2 , **_A : Tuple , ):
A__ : Optional[int] = vocab_size
A__ : List[Any] = max_position_embeddings
A__ : Optional[Any] = d_model
A__ : Optional[Any] = encoder_ffn_dim
A__ : int = encoder_layers
A__ : int = encoder_attention_heads
A__ : List[str] = decoder_ffn_dim
A__ : str = decoder_layers
A__ : Any = decoder_attention_heads
A__ : Any = dropout
A__ : Optional[int] = attention_dropout
A__ : Any = activation_dropout
A__ : List[str] = activation_function
A__ : List[Any] = init_std
A__ : Optional[int] = encoder_layerdrop
A__ : Optional[int] = decoder_layerdrop
A__ : Tuple = classifier_dropout
A__ : Optional[Any] = use_cache
A__ : Dict = encoder_layers
A__ : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=_A , pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , is_encoder_decoder=_A , decoder_start_token_id=_A , forced_eos_token_id=_A , **_A , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" , _A):
A__ : Any = self.bos_token_id
warnings.warn(
F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
"The config can simply be saved and uploaded again to be fixed.")
class lowerCAmelCase__ ( UpperCamelCase ):
@property
def _lowercase ( self : str):
if self.task in ["default", "seq2seq-lm"]:
A__ : Union[str, Any] = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
])
if self.use_past:
A__ : Union[str, Any] = {0: "batch"}
A__ : Tuple = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
A__ : List[Any] = {0: "batch", 1: "decoder_sequence"}
A__ : List[str] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(_A , direction="inputs")
elif self.task == "causal-lm":
# TODO: figure this case out.
A__ : Optional[int] = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
])
if self.use_past:
A__ : List[str] = self.num_layers
for i in range(_A):
A__ : int = {0: "batch", 2: "past_sequence + sequence"}
A__ : Tuple = {0: "batch", 2: "past_sequence + sequence"}
else:
A__ : Optional[int] = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
])
return common_inputs
@property
def _lowercase ( self : Tuple):
if self.task in ["default", "seq2seq-lm"]:
A__ : str = super().outputs
else:
A__ : str = super(_A , self).outputs
if self.use_past:
A__ : Any = self.num_layers
for i in range(_A):
A__ : int = {0: "batch", 2: "past_sequence + sequence"}
A__ : Union[str, Any] = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def _lowercase ( self : List[str] , _A : PreTrainedTokenizer , _A : int = -1 , _A : int = -1 , _A : bool = False , _A : Optional[TensorType] = None , ):
A__ : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_A , _A , _A , _A , _A)
# Generate decoder inputs
A__ : Any = seq_length if not self.use_past else 1
A__ : str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_A , _A , _A , _A , _A)
A__ : List[str] = {F'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
A__ : int = dict(**_A , **_A)
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
else:
import torch
A__ : Tuple = common_inputs["input_ids"].shape
A__ : Tuple = common_inputs["decoder_input_ids"].shape[1]
A__ : Optional[Any] = self.num_attention_heads
A__ : Optional[Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A__ : Tuple = decoder_seq_length + 3
A__ : str = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
A__ : Dict = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(_A , _A)] , dim=1)
A__ : List[Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
A__ : str = self.num_layers
A__ : int = min(_A , _A)
A__ : List[Any] = max(_A , _A) - min_num_layers
A__ : Optional[Any] = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(_A):
common_inputs["past_key_values"].append(
(
torch.zeros(_A),
torch.zeros(_A),
torch.zeros(_A),
torch.zeros(_A),
))
# TODO: test this.
A__ : Any = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(_A , _A):
common_inputs["past_key_values"].append((torch.zeros(_A), torch.zeros(_A)))
return common_inputs
def _lowercase ( self : Optional[Any] , _A : PreTrainedTokenizer , _A : int = -1 , _A : int = -1 , _A : bool = False , _A : Optional[TensorType] = None , ):
A__ : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_A , _A , _A , _A , _A)
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
else:
import torch
A__ : List[Any] = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
A__ : Dict = seqlen + 2
A__ : str = self.num_layers
A__ : Any = self.num_attention_heads
A__ : List[Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A__ : Union[str, Any] = common_inputs["attention_mask"].dtype
A__ : Optional[int] = torch.cat(
[common_inputs["attention_mask"], torch.ones(_A , _A , dtype=_A)] , dim=1)
A__ : Optional[int] = [
(torch.zeros(_A), torch.zeros(_A)) for _ in range(_A)
]
return common_inputs
def _lowercase ( self : Dict , _A : PreTrainedTokenizer , _A : int = -1 , _A : int = -1 , _A : bool = False , _A : Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A__ : Dict = compute_effective_axis_dimension(
_A , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A__ : Tuple = tokenizer.num_special_tokens_to_add(_A)
A__ : Tuple = compute_effective_axis_dimension(
_A , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_A)
# Generate dummy inputs according to compute batch and sequence
A__ : Optional[Any] = [" ".join([tokenizer.unk_token]) * seq_length] * batch_size
A__ : Tuple = dict(tokenizer(_A , return_tensors=_A))
return common_inputs
def _lowercase ( self : List[str] , _A : PreTrainedTokenizer , _A : int = -1 , _A : int = -1 , _A : bool = False , _A : Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
A__ : int = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A)
elif self.task == "causal-lm":
A__ : Dict = self._generate_dummy_inputs_for_causal_lm(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A)
else:
A__ : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A)
return common_inputs
def _lowercase ( self : Optional[Any] , _A : Optional[Any] , _A : Tuple , _A : Dict , _A : Any):
if self.task in ["default", "seq2seq-lm"]:
A__ : Optional[int] = super()._flatten_past_key_values_(_A , _A , _A , _A)
else:
A__ : Union[str, Any] = super(_A , self)._flatten_past_key_values_(
_A , _A , _A , _A) | 713 |
def snake_case__ ( __lowercase ) -> int:
"""simple docstring"""
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
A__ : Tuple = 1
A__ : Union[str, Any] = 1
while repunit:
A__ : Union[str, Any] = (1_0 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def snake_case__ ( __lowercase = 1_0_0_0_0_0_0 ) -> int:
"""simple docstring"""
A__ : Dict = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(__lowercase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f"""{solution() = }""") | 182 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.