code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
'''simple docstring'''
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
raise ValueError("""The length of profit and weight must be same.""" )
if max_weight <= 0:
raise ValueError("""max_weight must greater than zero.""" )
if any(p < 0 for p in profit ):
raise ValueError("""Profit can not be negative.""" )
if any(w < 0 for w in weight ):
raise ValueError("""Weight can not be negative.""" )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
lowercase_ = [p / w for p, w in zip(__lowerCAmelCase , __lowerCAmelCase )]
# Creating a copy of the list and sorting profit/weight in ascending order
lowercase_ = sorted(__lowerCAmelCase )
# declaring useful variables
lowercase_ = len(__lowerCAmelCase )
lowercase_ = 0
lowercase_ = 0
lowercase_ = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
lowercase_ = sorted_profit_by_weight[length - i - 1]
lowercase_ = profit_by_weight.index(__lowerCAmelCase )
lowercase_ = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
"Input profits, weights, and then max_weight (all positive ints) separated by "
"spaces."
)
UpperCAmelCase : Any = [int(x) for x in input("Input profits separated by spaces: ").split()]
UpperCAmelCase : Optional[Any] = [int(x) for x in input("Input weights separated by spaces: ").split()]
UpperCAmelCase : List[str] = int(input("Max weight allowed: "))
# Function Call
calc_profit(profit, weight, max_weight)
| 313
|
"""simple docstring"""
from collections.abc import Sequence
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase = False ) -> float:
'''simple docstring'''
if not arr:
return 0
lowercase_ = 0 if allow_empty_subarrays else float("""-inf""" )
lowercase_ = 0.0
for num in arr:
lowercase_ = max(0 if allow_empty_subarrays else num , curr_sum + num )
lowercase_ = max(__lowerCAmelCase , __lowerCAmelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase : Union[str, Any] = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F"{max_subarray_sum(nums) = }")
| 313
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
UpperCAmelCase : Dict = {
"configuration_speecht5": [
"SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP",
"SpeechT5Config",
"SpeechT5HifiGanConfig",
],
"feature_extraction_speecht5": ["SpeechT5FeatureExtractor"],
"processing_speecht5": ["SpeechT5Processor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Tuple = ["SpeechT5Tokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Tuple = [
"SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST",
"SpeechT5ForSpeechToText",
"SpeechT5ForSpeechToSpeech",
"SpeechT5ForTextToSpeech",
"SpeechT5Model",
"SpeechT5PreTrainedModel",
"SpeechT5HifiGan",
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
UpperCAmelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 313
|
"""simple docstring"""
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
UpperCAmelCase : Optional[int] = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[Any]=None):
"""simple docstring"""
lowercase_ = self.layer[current_layer](lowerCAmelCase_ , lowerCAmelCase_ , head_mask[current_layer])
lowercase_ = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , __UpperCAmelCase , )
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def __init__( self : Any , lowerCAmelCase_ : Dict):
"""simple docstring"""
super().__init__(lowerCAmelCase_)
lowercase_ = BertEncoderWithPabee(lowerCAmelCase_)
self.init_weights()
lowercase_ = 0
lowercase_ = 0
lowercase_ = 0
lowercase_ = 0
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ = threshold
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
lowercase_ = patience
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = 0
lowercase_ = 0
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = self.inference_layers_num / self.inference_instances_num
lowercase_ = (
F'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='''
F''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'''
)
print(lowerCAmelCase_)
@add_start_docstrings_to_model_forward(lowerCAmelCase_)
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : str=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[Any]=False , ):
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""")
elif input_ids is not None:
lowercase_ = input_ids.size()
elif inputs_embeds is not None:
lowercase_ = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""")
lowercase_ = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
lowercase_ = torch.ones(lowerCAmelCase_ , device=lowerCAmelCase_)
if token_type_ids is None:
lowercase_ = torch.zeros(lowerCAmelCase_ , dtype=torch.long , device=lowerCAmelCase_)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
lowercase_ = self.get_extended_attention_mask(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
lowercase_ , lowercase_ , lowercase_ = encoder_hidden_states.size()
lowercase_ = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
lowercase_ = torch.ones(lowerCAmelCase_ , device=lowerCAmelCase_)
lowercase_ = self.invert_attention_mask(lowerCAmelCase_)
else:
lowercase_ = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
lowercase_ = self.get_head_mask(lowerCAmelCase_ , self.config.num_hidden_layers)
lowercase_ = self.embeddings(
input_ids=lowerCAmelCase_ , position_ids=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , inputs_embeds=lowerCAmelCase_)
lowercase_ = embedding_output
if self.training:
lowercase_ = []
for i in range(self.config.num_hidden_layers):
lowercase_ = self.encoder.adaptive_forward(
lowerCAmelCase_ , current_layer=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , head_mask=lowerCAmelCase_)
lowercase_ = self.pooler(lowerCAmelCase_)
lowercase_ = output_layers[i](output_dropout(lowerCAmelCase_))
res.append(lowerCAmelCase_)
elif self.patience == 0: # Use all layers for inference
lowercase_ = self.encoder(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , )
lowercase_ = self.pooler(encoder_outputs[0])
lowercase_ = [output_layers[self.config.num_hidden_layers - 1](lowerCAmelCase_)]
else:
lowercase_ = 0
lowercase_ = None
lowercase_ = 0
for i in range(self.config.num_hidden_layers):
calculated_layer_num += 1
lowercase_ = self.encoder.adaptive_forward(
lowerCAmelCase_ , current_layer=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , head_mask=lowerCAmelCase_)
lowercase_ = self.pooler(lowerCAmelCase_)
lowercase_ = output_layers[i](lowerCAmelCase_)
if regression:
lowercase_ = logits.detach()
if patient_result is not None:
lowercase_ = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels) < self.regression_threshold:
patient_counter += 1
else:
lowercase_ = 0
else:
lowercase_ = logits.detach().argmax(dim=1)
if patient_result is not None:
lowercase_ = patient_result.detach().argmax(dim=1)
if (patient_result is not None) and torch.all(labels.eq(lowerCAmelCase_)):
patient_counter += 1
else:
lowercase_ = 0
lowercase_ = logits
if patient_counter == self.patience:
break
lowercase_ = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , __UpperCAmelCase , )
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def __init__( self : Optional[Any] , lowerCAmelCase_ : str):
"""simple docstring"""
super().__init__(lowerCAmelCase_)
lowercase_ = config.num_labels
lowercase_ = BertModelWithPabee(lowerCAmelCase_)
lowercase_ = nn.Dropout(config.hidden_dropout_prob)
lowercase_ = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels) for _ in range(config.num_hidden_layers)])
self.init_weights()
@add_start_docstrings_to_model_forward(lowerCAmelCase_)
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : List[str]=None , ):
"""simple docstring"""
lowercase_ = self.bert(
input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , position_ids=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , inputs_embeds=lowerCAmelCase_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
lowercase_ = (logits[-1],)
if labels is not None:
lowercase_ = None
lowercase_ = 0
for ix, logits_item in enumerate(lowerCAmelCase_):
if self.num_labels == 1:
# We are doing regression
lowercase_ = MSELoss()
lowercase_ = loss_fct(logits_item.view(-1) , labels.view(-1))
else:
lowercase_ = CrossEntropyLoss()
lowercase_ = loss_fct(logits_item.view(-1 , self.num_labels) , labels.view(-1))
if total_loss is None:
lowercase_ = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
lowercase_ = (total_loss / total_weights,) + outputs
return outputs
| 313
| 1
|
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = torch.nn.Linear(1_0 , 1_0)
lowercase_ = torch.optim.SGD(model.parameters() , 0.1)
lowercase_ = Accelerator()
lowercase_ = accelerator.prepare(lowerCAmelCase_)
try:
pickle.loads(pickle.dumps(lowerCAmelCase_))
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''')
AcceleratorState._reset_state()
| 313
|
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Any:
'''simple docstring'''
if (
(cp >= 0x4e00 and cp <= 0x9fff)
or (cp >= 0x3400 and cp <= 0x4dbf) #
or (cp >= 0x20000 and cp <= 0x2a6df) #
or (cp >= 0x2a700 and cp <= 0x2b73f) #
or (cp >= 0x2b740 and cp <= 0x2b81f) #
or (cp >= 0x2b820 and cp <= 0x2ceaf) #
or (cp >= 0xf900 and cp <= 0xfaff)
or (cp >= 0x2f800 and cp <= 0x2fa1f) #
): #
return True
return False
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[str]:
'''simple docstring'''
for char in word:
lowercase_ = ord(__lowerCAmelCase )
if not _is_chinese_char(__lowerCAmelCase ):
return 0
return 1
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Any:
'''simple docstring'''
lowercase_ = set()
for token in tokens:
lowercase_ = len(__lowerCAmelCase ) > 1 and is_chinese(__lowerCAmelCase )
if chinese_word:
word_set.add(__lowerCAmelCase )
lowercase_ = list(__lowerCAmelCase )
return word_list
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
lowercase_ = max([len(__lowerCAmelCase ) for w in chinese_word_set] )
lowercase_ = bert_tokens
lowercase_ , lowercase_ = 0, len(__lowerCAmelCase )
while start < end:
lowercase_ = True
if is_chinese(bert_word[start] ):
lowercase_ = min(end - start , __lowerCAmelCase )
for i in range(__lowerCAmelCase , 1 , -1 ):
lowercase_ = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
lowercase_ = """##""" + bert_word[j]
lowercase_ = start + i
lowercase_ = False
break
if single_word:
start += 1
return bert_word
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = []
for i in range(0 , len(__lowerCAmelCase ) , 1_00 ):
lowercase_ = ltp_tokenizer.seg(lines[i : i + 1_00] )[0]
lowercase_ = [get_chinese_word(__lowerCAmelCase ) for r in res]
ltp_res.extend(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
lowercase_ = []
for i in range(0 , len(__lowerCAmelCase ) , 1_00 ):
lowercase_ = bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=5_12 )
bert_res.extend(res["""input_ids"""] )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
lowercase_ = []
for input_ids, chinese_word in zip(__lowerCAmelCase , __lowerCAmelCase ):
lowercase_ = []
for id in input_ids:
lowercase_ = bert_tokenizer._convert_id_to_token(__lowerCAmelCase )
input_tokens.append(__lowerCAmelCase )
lowercase_ = add_sub_symbol(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__lowerCAmelCase ):
if token[:2] == "##":
lowercase_ = token[2:]
# save chinese tokens' pos
if len(__lowerCAmelCase ) == 1 and _is_chinese_char(ord(__lowerCAmelCase ) ):
ref_id.append(__lowerCAmelCase )
ref_ids.append(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
return ref_ids
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
with open(args.file_name , """r""" , encoding="""utf-8""" ) as f:
lowercase_ = f.readlines()
lowercase_ = [line.strip() for line in data if len(__lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
lowercase_ = LTP(args.ltp ) # faster in GPU device
lowercase_ = BertTokenizer.from_pretrained(args.bert )
lowercase_ = prepare_ref(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
with open(args.save_path , """w""" , encoding="""utf-8""" ) as f:
lowercase_ = [json.dumps(__lowerCAmelCase ) + """\n""" for ref in ref_ids]
f.writelines(__lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase : List[Any] = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp", type=str, default="./resources/ltp", help="resources for LTP tokenizer, usually a path"
)
parser.add_argument("--bert", type=str, default="./resources/robert", help="resources for Bert tokenizer")
parser.add_argument("--save_path", type=str, default="./resources/ref.txt", help="path to save res")
UpperCAmelCase : int = parser.parse_args()
main(args)
| 313
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict=1_3 , lowerCAmelCase_ : Optional[int]=3_2 , lowerCAmelCase_ : Optional[Any]=3 , lowerCAmelCase_ : List[str]=4 , lowerCAmelCase_ : Dict=[1_0, 2_0, 3_0, 4_0] , lowerCAmelCase_ : int=[2, 2, 3, 2] , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Tuple=3_7 , lowerCAmelCase_ : Optional[int]="gelu" , lowerCAmelCase_ : Tuple=1_0 , lowerCAmelCase_ : Tuple=0.02 , lowerCAmelCase_ : int=["stage2", "stage3", "stage4"] , lowerCAmelCase_ : Any=[2, 3, 4] , lowerCAmelCase_ : List[str]=None , ):
"""simple docstring"""
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = image_size
lowercase_ = num_channels
lowercase_ = num_stages
lowercase_ = hidden_sizes
lowercase_ = depths
lowercase_ = is_training
lowercase_ = use_labels
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = num_labels
lowercase_ = initializer_range
lowercase_ = out_features
lowercase_ = out_indices
lowercase_ = scope
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.num_labels)
lowercase_ = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str):
"""simple docstring"""
lowercase_ = ConvNextVaModel(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_)
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
lowercase_ = ConvNextVaForImageClassification(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple):
"""simple docstring"""
lowercase_ = ConvNextVaBackbone(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_)
# verify hidden states
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[1], 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:])
# verify backbone works with out_features=None
lowercase_ = None
lowercase_ = ConvNextVaBackbone(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[-1], 1, 1])
# verify channels
self.parent.assertEqual(len(model.channels) , 1)
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]])
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
lowercase_ = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ = config_and_inputs
lowercase_ = {"""pixel_values""": pixel_values}
return config, inputs_dict
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ = config_and_inputs
lowercase_ = {"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
lowercase__ = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowercase__ = (
{"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = ConvNextVaModelTester(self)
lowercase_ = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=3_7)
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
return
@unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""")
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
pass
@unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""")
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
pass
@unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""")
def _UpperCAmelCase ( self : str):
"""simple docstring"""
pass
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_with_labels()
lowercase_ = True
if model_class.__name__ in [
*get_values(lowerCAmelCase_),
*get_values(lowerCAmelCase_),
]:
continue
lowercase_ = model_class(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.train()
lowercase_ = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_)
lowercase_ = model(**lowerCAmelCase_).loss
loss.backward()
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_with_labels()
lowercase_ = False
lowercase_ = True
if (
model_class.__name__
in [*get_values(lowerCAmelCase_), *get_values(lowerCAmelCase_)]
or not model_class.supports_gradient_checkpointing
):
continue
lowercase_ = model_class(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.gradient_checkpointing_enable()
model.train()
lowercase_ = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_)
lowercase_ = model(**lowerCAmelCase_).loss
loss.backward()
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(lowerCAmelCase_)
lowercase_ = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase_)
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_)
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str]):
lowercase_ = model_class(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
with torch.no_grad():
lowercase_ = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_))
lowercase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase_ = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase_) , expected_num_stages + 1)
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_)
@slow
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = ConvNextVaModel.from_pretrained(lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
def _SCREAMING_SNAKE_CASE () -> Union[str, Any]:
'''simple docstring'''
lowercase_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""") if is_vision_available() else None
@slow
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""").to(lowerCAmelCase_)
lowercase_ = self.default_image_processor
lowercase_ = prepare_img()
lowercase_ = preprocessor(images=lowerCAmelCase_ , return_tensors="""pt""").to(lowerCAmelCase_)
# forward pass
with torch.no_grad():
lowercase_ = model(**lowerCAmelCase_)
# verify the logits
lowercase_ = torch.Size((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , lowerCAmelCase_)
lowercase_ = torch.tensor([0.9_996, 0.1_966, -0.4_386]).to(lowerCAmelCase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1E-4))
| 313
|
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> list[str]:
'''simple docstring'''
if nth_term == "":
return [""]
lowercase_ = int(__lowerCAmelCase )
lowercase_ = int(__lowerCAmelCase )
lowercase_ = []
for temp in range(int(__lowerCAmelCase ) ):
series.append(F'''1 / {pow(temp + 1 , int(__lowerCAmelCase ) )}''' if series else """1""" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase : List[str] = int(input("Enter the last number (nth term) of the P-Series"))
UpperCAmelCase : Tuple = int(input("Enter the power for P-Series"))
print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
print(p_series(nth_term, power))
| 313
| 1
|
"""simple docstring"""
from collections.abc import Sequence
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase = False ) -> float:
'''simple docstring'''
if not arr:
return 0
lowercase_ = 0 if allow_empty_subarrays else float("""-inf""" )
lowercase_ = 0.0
for num in arr:
lowercase_ = max(0 if allow_empty_subarrays else num , curr_sum + num )
lowercase_ = max(__lowerCAmelCase , __lowerCAmelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase : Union[str, Any] = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F"{max_subarray_sum(nums) = }")
| 313
|
"""simple docstring"""
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = Vector([1, 2, 3])
self.assertEqual(x.component(0) , 1)
self.assertEqual(x.component(2) , 3)
lowercase_ = Vector()
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = Vector([0, 0, 0, 0, 0, 1])
self.assertEqual(str(lowerCAmelCase_) , """(0,0,0,0,0,1)""")
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = Vector([1, 2, 3, 4])
self.assertEqual(len(lowerCAmelCase_) , 4)
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = Vector([1, 2])
lowercase_ = Vector([1, 2, 3, 4, 5])
lowercase_ = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
lowercase_ = Vector([1, -1, 1, -1, 2, -3, 4, -5])
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3)
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3)
self.assertEqual(z.euclidean_length() , 0)
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = Vector([1, 2, 3])
lowercase_ = Vector([1, 1, 1])
self.assertEqual((x + y).component(0) , 2)
self.assertEqual((x + y).component(1) , 3)
self.assertEqual((x + y).component(2) , 4)
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = Vector([1, 2, 3])
lowercase_ = Vector([1, 1, 1])
self.assertEqual((x - y).component(0) , 0)
self.assertEqual((x - y).component(1) , 1)
self.assertEqual((x - y).component(2) , 2)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = Vector([1, 2, 3])
lowercase_ = Vector([2, -1, 4]) # for test of dot product
lowercase_ = Vector([1, -2, -1])
self.assertEqual(str(x * 3.0) , """(3.0,6.0,9.0)""")
self.assertEqual((a * b) , 0)
def _UpperCAmelCase ( self : int):
"""simple docstring"""
self.assertEqual(str(zero_vector(1_0)).count("""0""") , 1_0)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
self.assertEqual(str(unit_basis_vector(3 , 1)) , """(0,1,0)""")
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = Vector([1, 2, 3])
lowercase_ = Vector([1, 0, 1])
self.assertEqual(str(axpy(2 , lowerCAmelCase_ , lowerCAmelCase_)) , """(3,4,7)""")
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = Vector([1, 0, 0, 0, 0, 0])
lowercase_ = x.copy()
self.assertEqual(str(lowerCAmelCase_) , str(lowerCAmelCase_))
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = Vector([1, 0, 0])
x.change_component(0 , 0)
x.change_component(1 , 1)
self.assertEqual(str(lowerCAmelCase_) , """(0,1,0)""")
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(lowerCAmelCase_))
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
lowercase_ = [[-3, -1_4, -1_0], [-5, -1_0, -5], [-2, -1, 0]]
for x in range(a.height()):
for y in range(a.width()):
self.assertEqual(minors[x][y] , a.minor(lowerCAmelCase_ , lowerCAmelCase_))
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
lowercase_ = [[-3, 1_4, -1_0], [5, -1_0, 5], [-2, 1, 0]]
for x in range(a.height()):
for y in range(a.width()):
self.assertEqual(cofactors[x][y] , a.cofactor(lowerCAmelCase_ , lowerCAmelCase_))
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
self.assertEqual(-5 , a.determinant())
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3)
lowercase_ = Vector([1, 2, 3])
self.assertEqual("""(14,32,50)""" , str(a * x))
self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2))
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
a.change_component(0 , 2 , 5)
self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(lowerCAmelCase_))
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
self.assertEqual(7 , a.component(2 , 1) , 0.01)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
lowercase_ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 1_0]] , 3 , 3)
self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b))
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
lowercase_ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 1_0]] , 3 , 3)
self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b))
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
self.assertEqual(
"""|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5)) , )
if __name__ == "__main__":
unittest.main()
| 313
| 1
|
"""simple docstring"""
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
UpperCAmelCase : Optional[int] = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[Any]=None):
"""simple docstring"""
lowercase_ = self.layer[current_layer](lowerCAmelCase_ , lowerCAmelCase_ , head_mask[current_layer])
lowercase_ = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , __UpperCAmelCase , )
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def __init__( self : Any , lowerCAmelCase_ : Dict):
"""simple docstring"""
super().__init__(lowerCAmelCase_)
lowercase_ = BertEncoderWithPabee(lowerCAmelCase_)
self.init_weights()
lowercase_ = 0
lowercase_ = 0
lowercase_ = 0
lowercase_ = 0
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ = threshold
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
lowercase_ = patience
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = 0
lowercase_ = 0
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = self.inference_layers_num / self.inference_instances_num
lowercase_ = (
F'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='''
F''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'''
)
print(lowerCAmelCase_)
@add_start_docstrings_to_model_forward(lowerCAmelCase_)
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : str=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[Any]=False , ):
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""")
elif input_ids is not None:
lowercase_ = input_ids.size()
elif inputs_embeds is not None:
lowercase_ = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""")
lowercase_ = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
lowercase_ = torch.ones(lowerCAmelCase_ , device=lowerCAmelCase_)
if token_type_ids is None:
lowercase_ = torch.zeros(lowerCAmelCase_ , dtype=torch.long , device=lowerCAmelCase_)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
lowercase_ = self.get_extended_attention_mask(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
lowercase_ , lowercase_ , lowercase_ = encoder_hidden_states.size()
lowercase_ = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
lowercase_ = torch.ones(lowerCAmelCase_ , device=lowerCAmelCase_)
lowercase_ = self.invert_attention_mask(lowerCAmelCase_)
else:
lowercase_ = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
lowercase_ = self.get_head_mask(lowerCAmelCase_ , self.config.num_hidden_layers)
lowercase_ = self.embeddings(
input_ids=lowerCAmelCase_ , position_ids=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , inputs_embeds=lowerCAmelCase_)
lowercase_ = embedding_output
if self.training:
lowercase_ = []
for i in range(self.config.num_hidden_layers):
lowercase_ = self.encoder.adaptive_forward(
lowerCAmelCase_ , current_layer=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , head_mask=lowerCAmelCase_)
lowercase_ = self.pooler(lowerCAmelCase_)
lowercase_ = output_layers[i](output_dropout(lowerCAmelCase_))
res.append(lowerCAmelCase_)
elif self.patience == 0: # Use all layers for inference
lowercase_ = self.encoder(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , )
lowercase_ = self.pooler(encoder_outputs[0])
lowercase_ = [output_layers[self.config.num_hidden_layers - 1](lowerCAmelCase_)]
else:
lowercase_ = 0
lowercase_ = None
lowercase_ = 0
for i in range(self.config.num_hidden_layers):
calculated_layer_num += 1
lowercase_ = self.encoder.adaptive_forward(
lowerCAmelCase_ , current_layer=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , head_mask=lowerCAmelCase_)
lowercase_ = self.pooler(lowerCAmelCase_)
lowercase_ = output_layers[i](lowerCAmelCase_)
if regression:
lowercase_ = logits.detach()
if patient_result is not None:
lowercase_ = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels) < self.regression_threshold:
patient_counter += 1
else:
lowercase_ = 0
else:
lowercase_ = logits.detach().argmax(dim=1)
if patient_result is not None:
lowercase_ = patient_result.detach().argmax(dim=1)
if (patient_result is not None) and torch.all(labels.eq(lowerCAmelCase_)):
patient_counter += 1
else:
lowercase_ = 0
lowercase_ = logits
if patient_counter == self.patience:
break
lowercase_ = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , __UpperCAmelCase , )
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def __init__( self : Optional[Any] , lowerCAmelCase_ : str):
"""simple docstring"""
super().__init__(lowerCAmelCase_)
lowercase_ = config.num_labels
lowercase_ = BertModelWithPabee(lowerCAmelCase_)
lowercase_ = nn.Dropout(config.hidden_dropout_prob)
lowercase_ = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels) for _ in range(config.num_hidden_layers)])
self.init_weights()
@add_start_docstrings_to_model_forward(lowerCAmelCase_)
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : List[str]=None , ):
"""simple docstring"""
lowercase_ = self.bert(
input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , position_ids=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , inputs_embeds=lowerCAmelCase_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
lowercase_ = (logits[-1],)
if labels is not None:
lowercase_ = None
lowercase_ = 0
for ix, logits_item in enumerate(lowerCAmelCase_):
if self.num_labels == 1:
# We are doing regression
lowercase_ = MSELoss()
lowercase_ = loss_fct(logits_item.view(-1) , labels.view(-1))
else:
lowercase_ = CrossEntropyLoss()
lowercase_ = loss_fct(logits_item.view(-1 , self.num_labels) , labels.view(-1))
if total_loss is None:
lowercase_ = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
lowercase_ = (total_loss / total_weights,) + outputs
return outputs
| 313
|
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = 0
if start < end:
lowercase_ = randint(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = a[end]
lowercase_ = a[pivot]
lowercase_ = temp
lowercase_ , lowercase_ = _in_place_partition(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
count += _in_place_quick_sort(__lowerCAmelCase , __lowerCAmelCase , p - 1 )
count += _in_place_quick_sort(__lowerCAmelCase , p + 1 , __lowerCAmelCase )
return count
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = 0
lowercase_ = randint(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = a[end]
lowercase_ = a[pivot]
lowercase_ = temp
lowercase_ = start - 1
for index in range(__lowerCAmelCase , __lowerCAmelCase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
lowercase_ = new_pivot_index + 1
lowercase_ = a[new_pivot_index]
lowercase_ = a[index]
lowercase_ = temp
lowercase_ = a[new_pivot_index + 1]
lowercase_ = a[end]
lowercase_ = temp
return new_pivot_index + 1, count
UpperCAmelCase : Union[str, Any] = TemporaryFile()
UpperCAmelCase : Optional[int] = 100 # 1000 elements are to be sorted
UpperCAmelCase , UpperCAmelCase : List[str] = 0, 1 # mean and standard deviation
UpperCAmelCase : Optional[Any] = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
UpperCAmelCase : List[str] = np.load(outfile)
UpperCAmelCase : List[Any] = len(M) - 1
UpperCAmelCase : Optional[int] = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 313
| 1
|
"""simple docstring"""
from pathlib import Path
import fire
from tqdm import tqdm
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase="ro" , __lowerCAmelCase="en" , __lowerCAmelCase="wmt16" , __lowerCAmelCase=None ) -> None:
'''simple docstring'''
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("""run pip install datasets""" )
lowercase_ = F'''{src_lang}-{tgt_lang}'''
print(F'''Converting {dataset}-{pair}''' )
lowercase_ = datasets.load_dataset(__lowerCAmelCase , __lowerCAmelCase )
if save_dir is None:
lowercase_ = F'''{dataset}-{pair}'''
lowercase_ = Path(__lowerCAmelCase )
save_dir.mkdir(exist_ok=__lowerCAmelCase )
for split in ds.keys():
print(F'''Splitting {split} with {ds[split].num_rows} records''' )
# to save to val.source, val.target like summary datasets
lowercase_ = """val""" if split == """validation""" else split
lowercase_ = save_dir.joinpath(F'''{fn}.source''' )
lowercase_ = save_dir.joinpath(F'''{fn}.target''' )
lowercase_ = src_path.open("""w+""" )
lowercase_ = tgt_path.open("""w+""" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
lowercase_ = x["""translation"""]
src_fp.write(ex[src_lang] + """\n""" )
tgt_fp.write(ex[tgt_lang] + """\n""" )
print(F'''Saved {dataset} dataset to {save_dir}''' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 313
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase=False ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''module.blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''module.blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""module.cls_token""", """vit.embeddings.cls_token"""),
("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""module.pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""module.norm.weight""", """layernorm.weight"""),
("""module.norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowercase_ = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ) -> Any:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
lowercase_ = """"""
else:
lowercase_ = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase_ = state_dict.pop(F'''module.blocks.{i}.attn.qkv.weight''' )
lowercase_ = state_dict.pop(F'''module.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase_ = in_proj_weight[
: config.hidden_size, :
]
lowercase_ = in_proj_bias[: config.hidden_size]
lowercase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase_ = in_proj_weight[
-config.hidden_size :, :
]
lowercase_ = in_proj_bias[-config.hidden_size :]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase_ = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = [
"""module.fc.fc1.weight""",
"""module.fc.fc1.bias""",
"""module.fc.bn1.weight""",
"""module.fc.bn1.bias""",
"""module.fc.bn1.running_mean""",
"""module.fc.bn1.running_var""",
"""module.fc.bn1.num_batches_tracked""",
"""module.fc.fc2.weight""",
"""module.fc.fc2.bias""",
"""module.fc.bn2.weight""",
"""module.fc.bn2.bias""",
"""module.fc.bn2.running_mean""",
"""module.fc.bn2.running_var""",
"""module.fc.bn2.num_batches_tracked""",
"""module.fc.fc3.weight""",
"""module.fc.fc3.bias""",
]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = dct.pop(__lowerCAmelCase )
lowercase_ = val
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = ViTMSNConfig()
lowercase_ = 10_00
lowercase_ = """datasets/huggingface/label-files"""
lowercase_ = """imagenet-1k-id2label.json"""
lowercase_ = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase ) , """r""" ) )
lowercase_ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
lowercase_ = idalabel
lowercase_ = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowercase_ = 3_84
lowercase_ = 15_36
lowercase_ = 6
elif "l16" in checkpoint_url:
lowercase_ = 10_24
lowercase_ = 40_96
lowercase_ = 24
lowercase_ = 16
lowercase_ = 0.1
elif "b4" in checkpoint_url:
lowercase_ = 4
elif "l7" in checkpoint_url:
lowercase_ = 7
lowercase_ = 10_24
lowercase_ = 40_96
lowercase_ = 24
lowercase_ = 16
lowercase_ = 0.1
lowercase_ = ViTMSNModel(__lowerCAmelCase )
lowercase_ = torch.hub.load_state_dict_from_url(__lowerCAmelCase , map_location="""cpu""" )["""target_encoder"""]
lowercase_ = ViTImageProcessor(size=config.image_size )
remove_projection_head(__lowerCAmelCase )
lowercase_ = create_rename_keys(__lowerCAmelCase , base_model=__lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase , base_model=__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
lowercase_ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase_ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
lowercase_ = ViTImageProcessor(
size=config.image_size , image_mean=__lowerCAmelCase , image_std=__lowerCAmelCase )
lowercase_ = image_processor(images=__lowerCAmelCase , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
lowercase_ = model(**__lowerCAmelCase )
lowercase_ = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowercase_ = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] )
elif "b16" in checkpoint_url:
lowercase_ = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] )
elif "l16" in checkpoint_url:
lowercase_ = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] )
elif "b4" in checkpoint_url:
lowercase_ = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] )
else:
lowercase_ = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , __lowerCAmelCase , atol=1E-4 )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCAmelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
UpperCAmelCase : Tuple = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 313
| 1
|
"""simple docstring"""
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
UpperCAmelCase : Any = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n"
UpperCAmelCase : List[str] = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n"
UpperCAmelCase : str = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""") , id="""sequence"""),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""") , id="""sequence""") , id="""references"""),
}) , codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] , )
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int=4 , lowerCAmelCase_ : List[str]=False):
"""simple docstring"""
lowercase_ = compute_bleu(
reference_corpus=lowerCAmelCase_ , translation_corpus=lowerCAmelCase_ , max_order=lowerCAmelCase_ , smooth=lowerCAmelCase_)
((lowercase_) , (lowercase_) , (lowercase_) , (lowercase_) , (lowercase_) , (lowercase_)) = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 313
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
UpperCAmelCase : Tuple = logging.get_logger(__name__)
UpperCAmelCase : Optional[int] = {
"deepmind/language-perceiver": "https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "perceiver"
def __init__( self : Optional[int] , lowerCAmelCase_ : List[str]=2_5_6 , lowerCAmelCase_ : Dict=1_2_8_0 , lowerCAmelCase_ : List[Any]=7_6_8 , lowerCAmelCase_ : Optional[Any]=1 , lowerCAmelCase_ : List[Any]=2_6 , lowerCAmelCase_ : Optional[Any]=8 , lowerCAmelCase_ : Tuple=8 , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Optional[Any]="kv" , lowerCAmelCase_ : Dict=1 , lowerCAmelCase_ : Optional[Any]=1 , lowerCAmelCase_ : List[str]="gelu" , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Union[str, Any]=0.02 , lowerCAmelCase_ : List[Any]=1E-12 , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Union[str, Any]=2_6_2 , lowerCAmelCase_ : Union[str, Any]=2_0_4_8 , lowerCAmelCase_ : Any=5_6 , lowerCAmelCase_ : int=[3_6_8, 4_9_6] , lowerCAmelCase_ : Optional[int]=1_6 , lowerCAmelCase_ : Dict=1_9_2_0 , lowerCAmelCase_ : Optional[Any]=1_6 , lowerCAmelCase_ : Tuple=[1, 1_6, 2_2_4, 2_2_4] , **lowerCAmelCase_ : Union[str, Any] , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_)
lowercase_ = num_latents
lowercase_ = d_latents
lowercase_ = d_model
lowercase_ = num_blocks
lowercase_ = num_self_attends_per_block
lowercase_ = num_self_attention_heads
lowercase_ = num_cross_attention_heads
lowercase_ = qk_channels
lowercase_ = v_channels
lowercase_ = cross_attention_shape_for_attention
lowercase_ = self_attention_widening_factor
lowercase_ = cross_attention_widening_factor
lowercase_ = hidden_act
lowercase_ = attention_probs_dropout_prob
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = use_query_residual
# masked language modeling attributes
lowercase_ = vocab_size
lowercase_ = max_position_embeddings
# image classification attributes
lowercase_ = image_size
# flow attributes
lowercase_ = train_size
# multimodal autoencoding attributes
lowercase_ = num_frames
lowercase_ = audio_samples_per_frame
lowercase_ = samples_per_patch
lowercase_ = output_shape
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
@property
def _UpperCAmelCase ( self : str):
"""simple docstring"""
if self.task == "multiple-choice":
lowercase_ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowercase_ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""inputs""", dynamic_axis),
("""attention_mask""", dynamic_axis),
])
@property
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
return 1E-4
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional[TensorType] = None , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 4_0 , lowerCAmelCase_ : int = 4_0 , ):
"""simple docstring"""
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase_ = compute_effective_axis_dimension(
lowerCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase_ = preprocessor.num_special_tokens_to_add(lowerCAmelCase_)
lowercase_ = compute_effective_axis_dimension(
lowerCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase_)
# Generate dummy inputs according to compute batch and sequence
lowercase_ = [""" """.join(["""a"""]) * seq_length] * batch_size
lowercase_ = dict(preprocessor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_))
lowercase_ = inputs.pop("""input_ids""")
return inputs
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase_ = compute_effective_axis_dimension(lowerCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch)
lowercase_ = self._generate_dummy_images(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = dict(preprocessor(images=lowerCAmelCase_ , return_tensors=lowerCAmelCase_))
lowercase_ = inputs.pop("""pixel_values""")
return inputs
else:
raise ValueError(
"""Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.""")
| 313
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
UpperCAmelCase : List[str] = logging.get_logger(__name__)
UpperCAmelCase : Union[str, Any] = {
"openai/imagegpt-small": "",
"openai/imagegpt-medium": "",
"openai/imagegpt-large": "",
}
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "imagegpt"
lowercase__ = ["past_key_values"]
lowercase__ = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : List[Any] , lowerCAmelCase_ : Optional[int]=5_1_2 + 1 , lowerCAmelCase_ : Dict=3_2 * 3_2 , lowerCAmelCase_ : List[str]=5_1_2 , lowerCAmelCase_ : Optional[Any]=2_4 , lowerCAmelCase_ : int=8 , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : int="quick_gelu" , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : Tuple=1E-5 , lowerCAmelCase_ : Any=0.02 , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : List[str]=False , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : str=False , **lowerCAmelCase_ : Union[str, Any] , ):
"""simple docstring"""
lowercase_ = vocab_size
lowercase_ = n_positions
lowercase_ = n_embd
lowercase_ = n_layer
lowercase_ = n_head
lowercase_ = n_inner
lowercase_ = activation_function
lowercase_ = resid_pdrop
lowercase_ = embd_pdrop
lowercase_ = attn_pdrop
lowercase_ = layer_norm_epsilon
lowercase_ = initializer_range
lowercase_ = scale_attn_weights
lowercase_ = use_cache
lowercase_ = scale_attn_by_inverse_layer_idx
lowercase_ = reorder_and_upcast_attn
lowercase_ = tie_word_embeddings
super().__init__(tie_word_embeddings=lowerCAmelCase_ , **lowerCAmelCase_)
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
@property
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
])
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : "FeatureExtractionMixin" , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional["TensorType"] = None , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 3_2 , lowerCAmelCase_ : int = 3_2 , ):
"""simple docstring"""
lowercase_ = self._generate_dummy_images(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = dict(preprocessor(images=lowerCAmelCase_ , return_tensors=lowerCAmelCase_))
return inputs
| 313
|
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , unittest.TestCase ):
lowercase__ = BarthezTokenizer
lowercase__ = BarthezTokenizerFast
lowercase__ = True
lowercase__ = True
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
super().setUp()
lowercase_ = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""")
tokenizer.save_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname , legacy_format=lowerCAmelCase_)
lowercase_ = tokenizer
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = """<pad>"""
lowercase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_) , lowerCAmelCase_)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_) , lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , """<s>""")
self.assertEqual(vocab_keys[1] , """<pad>""")
self.assertEqual(vocab_keys[-1] , """<mask>""")
self.assertEqual(len(lowerCAmelCase_) , 1_0_1_1_2_2)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_1_2_2)
@require_torch
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
lowercase_ = [0, 5_7, 3_0_1_8, 7_0_3_0_7, 9_1, 2]
lowercase_ = self.tokenizer(
lowerCAmelCase_ , max_length=len(lowerCAmelCase_) , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors="""pt""")
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_)
self.assertEqual((2, 6) , batch.input_ids.shape)
self.assertEqual((2, 6) , batch.attention_mask.shape)
lowercase_ = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowercase_ = self.get_tokenizer()
lowercase_ = self.get_rust_tokenizer()
lowercase_ = """I was born in 92000, and this is falsé."""
lowercase_ = tokenizer.tokenize(lowerCAmelCase_)
lowercase_ = rust_tokenizer.tokenize(lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_)
lowercase_ = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = self.get_rust_tokenizer()
lowercase_ = tokenizer.encode(lowerCAmelCase_)
lowercase_ = rust_tokenizer.encode(lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
@slow
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = {"""input_ids""": [[0, 4_9_0, 1_4_3_2_8, 4_5_0_7, 3_5_4, 4_7, 4_3_6_6_9, 9_5, 2_5, 7_8_1_1_7, 2_0_2_1_5, 1_9_7_7_9, 1_9_0, 2_2, 4_0_0, 4, 3_5_3_4_3, 8_0_3_1_0, 6_0_3, 8_6, 2_4_9_3_7, 1_0_5, 3_3_4_3_8, 9_4_7_6_2, 1_9_6, 3_9_6_4_2, 7, 1_5, 1_5_9_3_3, 1_7_3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0_5_3_4, 8_7, 2_5, 6_6, 3_3_5_8, 1_9_6, 5_5_2_8_9, 8, 8_2_9_6_1, 8_1, 2_2_0_4, 7_5_2_0_3, 7, 1_5, 7_6_3, 1_2_9_5_6, 2_1_6, 1_7_8, 1_4_3_2_8, 9_5_9_5, 1_3_7_7, 6_9_6_9_3, 7, 4_4_8, 7_1_0_2_1, 1_9_6, 1_8_1_0_6, 1_4_3_7, 1_3_9_7_4, 1_0_8, 9_0_8_3, 4, 4_9_3_1_5, 7, 3_9, 8_6, 1_3_2_6, 2_7_9_3, 4_6_3_3_3, 4, 4_4_8, 1_9_6, 7_4_5_8_8, 7, 4_9_3_1_5, 7, 3_9, 2_1, 8_2_2, 3_8_4_7_0, 7_4, 2_1, 6_6_7_2_3, 6_2_4_8_0, 8, 2_2_0_5_0, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
lowercase_ = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase_ , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=lowerCAmelCase_ , )
| 313
| 1
|
"""simple docstring"""
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
UpperCAmelCase : Any = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "sequence-classification"
def __init__( self : List[Any] , lowerCAmelCase_ : List[str]):
"""simple docstring"""
if type(lowerCAmelCase_) == dict:
lowercase_ = Namespace(**lowerCAmelCase_)
lowercase_ = glue_output_modes[hparams.task]
lowercase_ = glue_tasks_num_labels[hparams.task]
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ , self.mode)
def _UpperCAmelCase ( self : Optional[Any] , **lowerCAmelCase_ : List[str]):
"""simple docstring"""
return self.model(**lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
lowercase_ = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowercase_ = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
lowercase_ = self(**lowerCAmelCase_)
lowercase_ = outputs[0]
lowercase_ = self.trainer.lr_schedulers[0]["""scheduler"""]
lowercase_ = {"""loss""": loss, """rate""": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = self.hparams
lowercase_ = processors[args.task]()
lowercase_ = processor.get_labels()
for mode in ["train", "dev"]:
lowercase_ = self._feature_file(lowerCAmelCase_)
if os.path.exists(lowerCAmelCase_) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , lowerCAmelCase_)
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir)
lowercase_ = (
processor.get_dev_examples(args.data_dir)
if mode == """dev"""
else processor.get_train_examples(args.data_dir)
)
lowercase_ = convert_examples_to_features(
lowerCAmelCase_ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("""Saving features into cached file %s""" , lowerCAmelCase_)
torch.save(lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : int , lowerCAmelCase_ : bool = False):
"""simple docstring"""
lowercase_ = """dev""" if mode == """test""" else mode
lowercase_ = self._feature_file(lowerCAmelCase_)
logger.info("""Loading features from cached file %s""" , lowerCAmelCase_)
lowercase_ = torch.load(lowerCAmelCase_)
lowercase_ = torch.tensor([f.input_ids for f in features] , dtype=torch.long)
lowercase_ = torch.tensor([f.attention_mask for f in features] , dtype=torch.long)
lowercase_ = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long)
if self.hparams.glue_output_mode == "classification":
lowercase_ = torch.tensor([f.label for f in features] , dtype=torch.long)
elif self.hparams.glue_output_mode == "regression":
lowercase_ = torch.tensor([f.label for f in features] , dtype=torch.float)
return DataLoader(
TensorDataset(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) , batch_size=lowerCAmelCase_ , shuffle=lowerCAmelCase_ , )
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple):
"""simple docstring"""
lowercase_ = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowercase_ = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
lowercase_ = self(**lowerCAmelCase_)
lowercase_ , lowercase_ = outputs[:2]
lowercase_ = logits.detach().cpu().numpy()
lowercase_ = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : List[Any]):
"""simple docstring"""
lowercase_ = torch.stack([x["""val_loss"""] for x in outputs]).mean().detach().cpu().item()
lowercase_ = np.concatenate([x["""pred"""] for x in outputs] , axis=0)
if self.hparams.glue_output_mode == "classification":
lowercase_ = np.argmax(lowerCAmelCase_ , axis=1)
elif self.hparams.glue_output_mode == "regression":
lowercase_ = np.squeeze(lowerCAmelCase_)
lowercase_ = np.concatenate([x["""target"""] for x in outputs] , axis=0)
lowercase_ = [[] for _ in range(out_label_ids.shape[0])]
lowercase_ = [[] for _ in range(out_label_ids.shape[0])]
lowercase_ = {**{"""val_loss""": val_loss_mean}, **compute_metrics(self.hparams.task , lowerCAmelCase_ , lowerCAmelCase_)}
lowercase_ = dict(results.items())
lowercase_ = results
return ret, preds_list, out_label_list
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : list):
"""simple docstring"""
lowercase_ , lowercase_ , lowercase_ = self._eval_end(lowerCAmelCase_)
lowercase_ = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : Optional[int]):
"""simple docstring"""
lowercase_ , lowercase_ , lowercase_ = self._eval_end(lowerCAmelCase_)
lowercase_ = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _UpperCAmelCase ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any):
"""simple docstring"""
BaseTransformer.add_model_specific_args(lowerCAmelCase_ , lowerCAmelCase_)
parser.add_argument(
"""--max_seq_length""" , default=1_2_8 , type=lowerCAmelCase_ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--task""" , default="""""" , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help="""The GLUE task to run""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=lowerCAmelCase_ , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""")
return parser
def _SCREAMING_SNAKE_CASE () -> List[Any]:
'''simple docstring'''
lowercase_ = argparse.ArgumentParser()
add_generic_args(__lowerCAmelCase , os.getcwd() )
lowercase_ = GLUETransformer.add_model_specific_args(__lowerCAmelCase , os.getcwd() )
lowercase_ = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
lowercase_ = os.path.join(
"""./results""" , F'''{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}''' , )
os.makedirs(args.output_dir )
lowercase_ = GLUETransformer(__lowerCAmelCase )
lowercase_ = generic_train(__lowerCAmelCase , __lowerCAmelCase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
lowercase_ = sorted(glob.glob(os.path.join(args.output_dir , """checkpoint-epoch=*.ckpt""" ) , recursive=__lowerCAmelCase ) )
lowercase_ = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(__lowerCAmelCase )
if __name__ == "__main__":
main()
| 313
|
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCAmelCase : Optional[Any] = "platform"
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class SCREAMING_SNAKE_CASE__ :
lowercase__ = PegasusConfig
lowercase__ = {}
lowercase__ = "gelu"
def __init__( self : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any]=1_3 , lowerCAmelCase_ : Any=7 , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : str=9_9 , lowerCAmelCase_ : Tuple=3_2 , lowerCAmelCase_ : Dict=5 , lowerCAmelCase_ : Union[str, Any]=4 , lowerCAmelCase_ : Dict=3_7 , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : Optional[int]=2_0 , lowerCAmelCase_ : Tuple=2 , lowerCAmelCase_ : List[str]=1 , lowerCAmelCase_ : Optional[Any]=0 , ):
"""simple docstring"""
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = eos_token_id
lowercase_ = pad_token_id
lowercase_ = bos_token_id
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size).clip(3 , self.vocab_size)
lowercase_ = np.expand_dims(np.array([self.eos_token_id] * self.batch_size) , 1)
lowercase_ = np.concatenate([input_ids, eos_tensor] , axis=1)
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowercase_ = prepare_pegasus_inputs_dict(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
return config, inputs_dict
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
lowercase_ = 2_0
lowercase_ = model_class_name(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""])
lowercase_ , lowercase_ = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowercase_ = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""")
lowercase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase_ = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""")
lowercase_ = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = model.decode(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''')
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict):
"""simple docstring"""
lowercase_ = 2_0
lowercase_ = model_class_name(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""])
lowercase_ , lowercase_ = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowercase_ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
] , axis=-1 , )
lowercase_ = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase_ = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""")
lowercase_ = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = model.decode(lowerCAmelCase_ , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_)
lowercase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''')
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , ) -> Optional[Any]:
'''simple docstring'''
if attention_mask is None:
lowercase_ = np.not_equal(__lowerCAmelCase , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
lowercase_ = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , unittest.TestCase ):
lowercase__ = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
lowercase__ = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
lowercase__ = True
lowercase__ = False
lowercase__ = False
lowercase__ = False
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = FlaxPegasusModelTester(self)
lowercase_ = ConfigTester(self , config_class=lowerCAmelCase_)
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowercase_ = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = model_class(lowerCAmelCase_)
@jax.jit
def encode_jitted(lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int]=None , **lowerCAmelCase_ : Optional[int]):
return model.encode(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_)
with self.subTest("""JIT Enabled"""):
lowercase_ = encode_jitted(**lowerCAmelCase_).to_tuple()
with self.subTest("""JIT Disabled"""):
with jax.disable_jit():
lowercase_ = encode_jitted(**lowerCAmelCase_).to_tuple()
self.assertEqual(len(lowerCAmelCase_) , len(lowerCAmelCase_))
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowercase_ = model_class(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""])
lowercase_ = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict):
return model.decode(
decoder_input_ids=lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , encoder_outputs=lowerCAmelCase_ , )
with self.subTest("""JIT Enabled"""):
lowercase_ = decode_jitted(**lowerCAmelCase_).to_tuple()
with self.subTest("""JIT Disabled"""):
with jax.disable_jit():
lowercase_ = decode_jitted(**lowerCAmelCase_).to_tuple()
self.assertEqual(len(lowerCAmelCase_) , len(lowerCAmelCase_))
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowercase_ = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=lowerCAmelCase_)
lowercase_ = np.ones((1, 1))
lowercase_ = model(lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
@slow
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""")
lowercase_ = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""")
lowercase_ = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
lowercase_ = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
lowercase_ = tokenizer(lowerCAmelCase_ , return_tensors="""np""" , truncation=lowerCAmelCase_ , max_length=5_1_2 , padding=lowerCAmelCase_)
lowercase_ = model.generate(**lowerCAmelCase_ , num_beams=2).sequences
lowercase_ = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_)
assert tgt_text == decoded
| 313
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase : Tuple = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Dict = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : int = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : int = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 313
|
"""simple docstring"""
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class SCREAMING_SNAKE_CASE__ :
lowercase__ = None
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = None
lowercase__ = None
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = True
lowercase__ = None
lowercase__ = 1
lowercase__ = None
lowercase__ = False
lowercase__ = None
lowercase__ = None
def _UpperCAmelCase ( self : int):
"""simple docstring"""
return self.__class__(**{k: copy.deepcopy(lowerCAmelCase_) for k, v in self.__dict__.items()})
| 313
| 1
|
"""simple docstring"""
from itertools import permutations
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> bool:
'''simple docstring'''
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
lowercase_ = [7, 11, 13, 17]
for i, test in enumerate(__lowerCAmelCase ):
if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = 10 ) -> int:
'''simple docstring'''
return sum(
int("""""".join(map(__lowerCAmelCase , __lowerCAmelCase ) ) )
for num in permutations(range(__lowerCAmelCase ) )
if is_substring_divisible(__lowerCAmelCase ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 313
|
"""simple docstring"""
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
UpperCAmelCase : Any = logging.get_logger(__name__)
UpperCAmelCase : Dict = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase : List[Any] = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
UpperCAmelCase : Union[str, Any] = {
"allenai/led-base-16384": 1_6384,
}
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = LEDTokenizer
lowercase__ = ["input_ids", "attention_mask"]
def __init__( self : Dict , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[Any]="replace" , lowerCAmelCase_ : Dict="<s>" , lowerCAmelCase_ : Union[str, Any]="</s>" , lowerCAmelCase_ : List[Any]="</s>" , lowerCAmelCase_ : Optional[Any]="<s>" , lowerCAmelCase_ : Union[str, Any]="<unk>" , lowerCAmelCase_ : List[str]="<pad>" , lowerCAmelCase_ : Dict="<mask>" , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : List[Any]=True , **lowerCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(
lowerCAmelCase_ , lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , errors=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , trim_offsets=lowerCAmelCase_ , **lowerCAmelCase_ , )
lowercase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get("""add_prefix_space""" , lowerCAmelCase_) != add_prefix_space:
lowercase_ = getattr(lowerCAmelCase_ , pre_tok_state.pop("""type"""))
lowercase_ = add_prefix_space
lowercase_ = pre_tok_class(**lowerCAmelCase_)
lowercase_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase_ = """post_processor"""
lowercase_ = getattr(self.backend_tokenizer , lowerCAmelCase_ , lowerCAmelCase_)
if tokenizer_component_instance:
lowercase_ = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase_ = tuple(state["""sep"""])
if "cls" in state:
lowercase_ = tuple(state["""cls"""])
lowercase_ = False
if state.get("""add_prefix_space""" , lowerCAmelCase_) != add_prefix_space:
lowercase_ = add_prefix_space
lowercase_ = True
if state.get("""trim_offsets""" , lowerCAmelCase_) != trim_offsets:
lowercase_ = trim_offsets
lowercase_ = True
if changes_to_apply:
lowercase_ = getattr(lowerCAmelCase_ , state.pop("""type"""))
lowercase_ = component_class(**lowerCAmelCase_)
setattr(self.backend_tokenizer , lowerCAmelCase_ , lowerCAmelCase_)
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""")
return None
return str(self._mask_token)
@mask_token.setter
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : str):
"""simple docstring"""
lowercase_ = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else value
lowercase_ = value
def _UpperCAmelCase ( self : Dict , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : List[Any]):
"""simple docstring"""
lowercase_ = kwargs.get("""is_split_into_words""" , lowerCAmelCase_)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""")
return super()._batch_encode_plus(*lowerCAmelCase_ , **lowerCAmelCase_)
def _UpperCAmelCase ( self : Union[str, Any] , *lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : Any):
"""simple docstring"""
lowercase_ = kwargs.get("""is_split_into_words""" , lowerCAmelCase_)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""")
return super()._encode_plus(*lowerCAmelCase_ , **lowerCAmelCase_)
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None):
"""simple docstring"""
lowercase_ = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_)
return tuple(lowerCAmelCase_)
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any]=None):
"""simple docstring"""
lowercase_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None):
"""simple docstring"""
lowercase_ = [self.sep_token_id]
lowercase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[bool] = None , ):
"""simple docstring"""
lowercase_ = super()._pad(
encoded_inputs=lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding_strategy=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , )
# Load from model defaults
if return_attention_mask is None:
lowercase_ = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowercase_ = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowercase_ = len(encoded_inputs["""global_attention_mask"""]) != len(lowerCAmelCase_)
if needs_to_be_padded:
lowercase_ = len(lowerCAmelCase_) - len(encoded_inputs["""global_attention_mask"""])
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowercase_ = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
lowercase_ = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side))
return encoded_inputs
| 313
| 1
|
"""simple docstring"""
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Dict:
'''simple docstring'''
lowercase_ = torch.load(__lowerCAmelCase , map_location="""cpu""" )
lowercase_ = chkpt["""model"""]
# We have the base model one level deeper than the original XLM repository
lowercase_ = {}
for k, v in state_dict.items():
if "pred_layer" in k:
lowercase_ = v
else:
lowercase_ = v
lowercase_ = chkpt["""params"""]
lowercase_ = {n: v for n, v in config.items() if not isinstance(__lowerCAmelCase , (torch.FloatTensor, numpy.ndarray) )}
lowercase_ = chkpt["""dico_word2id"""]
lowercase_ = {s + """</w>""" if s.find("""@@""" ) == -1 and i > 13 else s.replace("""@@""" , """""" ): i for s, i in vocab.items()}
# Save pytorch-model
lowercase_ = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
lowercase_ = pytorch_dump_folder_path + """/""" + CONFIG_NAME
lowercase_ = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""vocab_file"""]
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(__lowerCAmelCase , __lowerCAmelCase )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(__lowerCAmelCase , indent=2 ) + """\n""" )
print(F'''Save vocab file to {pytorch_config_dump_path}''' )
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(__lowerCAmelCase , indent=2 ) + """\n""" )
if __name__ == "__main__":
UpperCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xlm_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase : int = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 313
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase : Union[str, Any] = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : int = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
UpperCAmelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 313
| 1
|
"""simple docstring"""
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
UpperCAmelCase : List[Any] = 10
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
'''simple docstring'''
for i in range(__lowerCAmelCase , __lowerCAmelCase ):
if array[i] == target:
return i
return -1
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = 0
lowercase_ = len(__lowerCAmelCase )
while left <= right:
if right - left < precision:
return lin_search(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowercase_ = (left + right) // 3 + 1
lowercase_ = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
lowercase_ = one_third - 1
elif array[two_third] < target:
lowercase_ = two_third + 1
else:
lowercase_ = one_third + 1
lowercase_ = two_third - 1
else:
return -1
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
'''simple docstring'''
if left < right:
if right - left < precision:
return lin_search(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowercase_ = (left + right) // 3 + 1
lowercase_ = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(__lowerCAmelCase , one_third - 1 , __lowerCAmelCase , __lowerCAmelCase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , __lowerCAmelCase , __lowerCAmelCase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase : Optional[int] = input("Enter numbers separated by comma:\n").strip()
UpperCAmelCase : Dict = [int(item.strip()) for item in user_input.split(",")]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
UpperCAmelCase : Tuple = int(input("Enter the number to be found in the list:\n").strip())
UpperCAmelCase : str = ite_ternary_search(collection, target)
UpperCAmelCase : List[str] = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F"Iterative search: {target} found at positions: {resulta}")
print(F"Recursive search: {target} found at positions: {resulta}")
else:
print("Not found")
| 313
|
"""simple docstring"""
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any]=1_3 , lowerCAmelCase_ : Optional[Any]=7 , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : int=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Tuple=9_9 , lowerCAmelCase_ : List[str]=6_4 , lowerCAmelCase_ : Optional[int]=3_2 , lowerCAmelCase_ : Optional[Any]=5 , lowerCAmelCase_ : Optional[int]=4 , lowerCAmelCase_ : str=3_7 , lowerCAmelCase_ : Any="gelu" , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : int=5_1_2 , lowerCAmelCase_ : Optional[int]=1_6 , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : List[Any]=0.02 , lowerCAmelCase_ : Optional[int]=3 , lowerCAmelCase_ : Optional[Any]=4 , lowerCAmelCase_ : Dict=None , ):
"""simple docstring"""
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_input_mask
lowercase_ = use_token_type_ids
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = embedding_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = num_labels
lowercase_ = num_choices
lowercase_ = scope
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase_ = None
if self.use_input_mask:
lowercase_ = random_attention_mask([self.batch_size, self.seq_length])
lowercase_ = None
if self.use_token_type_ids:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
lowercase_ = None
lowercase_ = None
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
lowercase_ = ids_tensor([self.batch_size] , self.num_choices)
lowercase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict):
"""simple docstring"""
lowercase_ = MegatronBertModel(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_)
lowercase_ = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_)
lowercase_ = model(lowerCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str]):
"""simple docstring"""
lowercase_ = MegatronBertForMaskedLM(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int]):
"""simple docstring"""
lowercase_ = MegatronBertForCausalLM(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
lowercase_ = MegatronBertForNextSentencePrediction(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
lowercase_ = MegatronBertForPreTraining(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , next_sentence_label=lowerCAmelCase_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple):
"""simple docstring"""
lowercase_ = MegatronBertForQuestionAnswering(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str]):
"""simple docstring"""
lowercase_ = self.num_labels
lowercase_ = MegatronBertForSequenceClassification(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict):
"""simple docstring"""
lowercase_ = self.num_labels
lowercase_ = MegatronBertForTokenClassification(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
lowercase_ = self.num_choices
lowercase_ = MegatronBertForMultipleChoice(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
lowercase_ = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
lowercase_ = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
lowercase_ = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
lowercase__ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase__ = (
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase__ = True
# test_resize_embeddings = False
lowercase__ = False
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any]=False):
"""simple docstring"""
lowercase_ = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_)
if return_labels:
if model_class in get_values(lowerCAmelCase_):
lowercase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase_)
lowercase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_)
return inputs_dict
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = MegatronBertModelTester(self)
lowercase_ = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=3_7)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*lowerCAmelCase_)
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*lowerCAmelCase_)
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*lowerCAmelCase_)
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*lowerCAmelCase_)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
return torch.tensor(
__lowerCAmelCase , dtype=torch.long , device=__lowerCAmelCase , )
UpperCAmelCase : Any = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
@unittest.skip("""Model is not available.""")
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = """nvidia/megatron-bert-uncased-345m"""
if "MYDIR" in os.environ:
lowercase_ = os.path.join(os.environ["""MYDIR"""] , lowerCAmelCase_)
lowercase_ = MegatronBertModel.from_pretrained(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.half()
lowercase_ = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]])
with torch.no_grad():
lowercase_ = model(lowerCAmelCase_)[0]
lowercase_ = torch.Size((1, 9, 1_0_2_4))
self.assertEqual(output.shape , lowerCAmelCase_)
lowercase_ = [-0.6_040, -0.2_517, -0.1_025, 0.3_420, -0.6_758, -0.0_017, -0.1_089, -0.1_990, 0.5_728]
for ii in range(3):
for jj in range(3):
lowercase_ = output[0, ii, jj]
lowercase_ = expected[3 * ii + jj]
lowercase_ = """ii={} jj={} a={} b={}""".format(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
self.assertTrue(math.isclose(lowerCAmelCase_ , lowerCAmelCase_ , rel_tol=lowerCAmelCase_ , abs_tol=lowerCAmelCase_) , msg=lowerCAmelCase_)
| 313
| 1
|
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
UpperCAmelCase : int = imread(r"digital_image_processing/image_data/lena_small.jpg")
UpperCAmelCase : Any = cvtColor(img, COLOR_BGR2GRAY)
def _SCREAMING_SNAKE_CASE () -> List[str]:
'''simple docstring'''
lowercase_ = cn.convert_to_negative(__lowerCAmelCase )
# assert negative_img array for at least one True
assert negative_img.any()
def _SCREAMING_SNAKE_CASE () -> Tuple:
'''simple docstring'''
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(__lowerCAmelCase , 1_10 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def _SCREAMING_SNAKE_CASE () -> List[Any]:
'''simple docstring'''
lowercase_ = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def _SCREAMING_SNAKE_CASE () -> List[Any]:
'''simple docstring'''
lowercase_ = imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
lowercase_ = canny.canny(__lowerCAmelCase )
# assert canny array for at least one True
assert canny_array.any()
def _SCREAMING_SNAKE_CASE () -> List[str]:
'''simple docstring'''
assert gg.gaussian_filter(__lowerCAmelCase , 5 , sigma=0.9 ).all()
def _SCREAMING_SNAKE_CASE () -> Union[str, Any]:
'''simple docstring'''
lowercase_ = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
lowercase_ = conv.img_convolve(__lowerCAmelCase , __lowerCAmelCase ).astype(__lowerCAmelCase )
assert res.any()
def _SCREAMING_SNAKE_CASE () -> Any:
'''simple docstring'''
assert med.median_filter(__lowerCAmelCase , 3 ).any()
def _SCREAMING_SNAKE_CASE () -> Union[str, Any]:
'''simple docstring'''
lowercase_ , lowercase_ = sob.sobel_filter(__lowerCAmelCase )
assert grad.any() and theta.any()
def _SCREAMING_SNAKE_CASE () -> Any:
'''simple docstring'''
lowercase_ = sp.make_sepia(__lowerCAmelCase , 20 )
assert sepia.all()
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = "digital_image_processing/image_data/lena_small.jpg" ) -> List[Any]:
'''simple docstring'''
lowercase_ = bs.Burkes(imread(__lowerCAmelCase , 1 ) , 1_20 )
burkes.process()
assert burkes.output_img.any()
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = "digital_image_processing/image_data/lena_small.jpg" , ) -> Dict:
'''simple docstring'''
lowercase_ = rs.NearestNeighbour(imread(__lowerCAmelCase , 1 ) , 4_00 , 2_00 )
nn.process()
assert nn.output.any()
def _SCREAMING_SNAKE_CASE () -> Optional[int]:
'''simple docstring'''
lowercase_ = """digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
lowercase_ = imread(__lowerCAmelCase , 0 )
# Test for get_neighbors_pixel function() return not None
lowercase_ = 0
lowercase_ = 0
lowercase_ = image[x_coordinate][y_coordinate]
lowercase_ = lbp.get_neighbors_pixel(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowercase_ = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
lowercase_ = lbp.local_binary_value(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
assert lbp_image.any()
| 313
|
"""simple docstring"""
from __future__ import annotations
import numpy as np
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> tuple[np.ndarray, np.ndarray]:
'''simple docstring'''
lowercase_ , lowercase_ = np.shape(__lowerCAmelCase )
if rows != columns:
lowercase_ = (
"""'table' has to be of square shaped array but got a """
F'''{rows}x{columns} array:\n{table}'''
)
raise ValueError(__lowerCAmelCase )
lowercase_ = np.zeros((rows, columns) )
lowercase_ = np.zeros((rows, columns) )
for i in range(__lowerCAmelCase ):
for j in range(__lowerCAmelCase ):
lowercase_ = sum(lower[i][k] * upper[k][j] for k in range(__lowerCAmelCase ) )
if upper[j][j] == 0:
raise ArithmeticError("""No LU decomposition exists""" )
lowercase_ = (table[i][j] - total) / upper[j][j]
lowercase_ = 1
for j in range(__lowerCAmelCase , __lowerCAmelCase ):
lowercase_ = sum(lower[i][k] * upper[k][j] for k in range(__lowerCAmelCase ) )
lowercase_ = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313
| 1
|
"""simple docstring"""
import datasets
from .evaluate import evaluate
UpperCAmelCase : Any = "\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n"
UpperCAmelCase : str = "\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n"
UpperCAmelCase : str = "\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the SQuAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]\n >>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]\n >>> squad_metric = datasets.load_metric(\"squad\")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {"""id""": datasets.Value("""string"""), """prediction_text""": datasets.Value("""string""")},
"""references""": {
"""id""": datasets.Value("""string"""),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string"""),
"""answer_start""": datasets.Value("""int32"""),
}),
},
}) , codebase_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , reference_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , )
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
lowercase_ = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
lowercase_ = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
lowercase_ = evaluate(dataset=lowerCAmelCase_ , predictions=lowerCAmelCase_)
return score
| 313
|
"""simple docstring"""
import os
from collections.abc import Iterator
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = "." ) -> Iterator[str]:
'''simple docstring'''
for dir_path, dir_names, filenames in os.walk(__lowerCAmelCase ):
lowercase_ = [d for d in dir_names if d != """scripts""" and d[0] not in """._"""]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(__lowerCAmelCase )[1] in (".py", ".ipynb"):
yield os.path.join(__lowerCAmelCase , __lowerCAmelCase ).lstrip("""./""" )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return F'''{i * " "}*''' if i else "\n##"
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(__lowerCAmelCase ) or old_parts[i] != new_part) and new_part:
print(F'''{md_prefix(__lowerCAmelCase )} {new_part.replace("_" , " " ).title()}''' )
return new_path
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = "." ) -> None:
'''simple docstring'''
lowercase_ = """"""
for filepath in sorted(good_file_paths(__lowerCAmelCase ) ):
lowercase_ , lowercase_ = os.path.split(__lowerCAmelCase )
if filepath != old_path:
lowercase_ = print_path(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = (filepath.count(os.sep ) + 1) if filepath else 0
lowercase_ = F'''{filepath}/{filename}'''.replace(""" """ , """%20""" )
lowercase_ = os.path.splitext(filename.replace("""_""" , """ """ ).title() )[0]
print(F'''{md_prefix(__lowerCAmelCase )} [{filename}]({url})''' )
if __name__ == "__main__":
print_directory_md(".")
| 313
| 1
|
"""simple docstring"""
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(".")
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Any:
'''simple docstring'''
lowercase_ = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"""`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got """
F'''{test_file} instead.''' )
lowercase_ = components[-1]
if not test_fn.endswith("""py""" ):
raise ValueError(F'''`test_file` should be a python file. Got {test_fn} instead.''' )
if not test_fn.startswith("""test_modeling_""" ):
raise ValueError(
F'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' )
lowercase_ = components[:-1] + [test_fn.replace(""".py""" , """""" )]
lowercase_ = """.""".join(__lowerCAmelCase )
return test_module_path
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = get_module_path(__lowerCAmelCase )
lowercase_ = importlib.import_module(__lowerCAmelCase )
return test_module
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Any:
'''simple docstring'''
lowercase_ = []
lowercase_ = get_test_module(__lowerCAmelCase )
for attr in dir(__lowerCAmelCase ):
if attr.endswith("""ModelTester""" ):
tester_classes.append(getattr(__lowerCAmelCase , __lowerCAmelCase ) )
# sort with class names
return sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : x.__name__ )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase_ = []
lowercase_ = get_test_module(__lowerCAmelCase )
for attr in dir(__lowerCAmelCase ):
lowercase_ = getattr(__lowerCAmelCase , __lowerCAmelCase )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
lowercase_ = getattr(__lowerCAmelCase , """all_model_classes""" , [] )
if len(__lowerCAmelCase ) > 0:
test_classes.append(__lowerCAmelCase )
# sort with class names
return sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : x.__name__ )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = get_test_classes(__lowerCAmelCase )
lowercase_ = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : x.__name__ )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase_ = test_class()
if hasattr(__lowerCAmelCase , """setUp""" ):
test.setUp()
lowercase_ = None
if hasattr(__lowerCAmelCase , """model_tester""" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
lowercase_ = test.model_tester.__class__
return model_tester
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Any:
'''simple docstring'''
lowercase_ = get_test_classes(__lowerCAmelCase )
lowercase_ = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(__lowerCAmelCase )
# sort with class names
return sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : x.__name__ )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase_ = get_test_classes_for_model(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = []
for test_class in test_classes:
lowercase_ = get_model_tester_from_test_class(__lowerCAmelCase )
if tester_class is not None:
tester_classes.append(__lowerCAmelCase )
# sort with class names
return sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : x.__name__ )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = get_test_classes(__lowerCAmelCase )
lowercase_ = {test_class: get_model_tester_from_test_class(__lowerCAmelCase ) for test_class in test_classes}
return test_tester_mapping
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = get_model_classes(__lowerCAmelCase )
lowercase_ = {
model_class: get_test_classes_for_model(__lowerCAmelCase , __lowerCAmelCase ) for model_class in model_classes
}
return model_test_mapping
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = get_model_classes(__lowerCAmelCase )
lowercase_ = {
model_class: get_tester_classes_for_model(__lowerCAmelCase , __lowerCAmelCase ) for model_class in model_classes
}
return model_to_tester_mapping
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Tuple:
'''simple docstring'''
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
return o
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
return o.__name__
elif isinstance(__lowerCAmelCase , (list, tuple) ):
return [to_json(__lowerCAmelCase ) for x in o]
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
return {to_json(__lowerCAmelCase ): to_json(__lowerCAmelCase ) for k, v in o.items()}
else:
return o
| 313
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> list:
'''simple docstring'''
if any(not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or x < 0 for x in sequence ):
raise TypeError("""Sequence must be list of non-negative integers""" )
for _ in range(len(__lowerCAmelCase ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(__lowerCAmelCase , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 313
| 1
|
"""simple docstring"""
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : Dict , lowerCAmelCase_ : int = 1_6 , lowerCAmelCase_ : int = 8_8 , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : float = 0.0 , lowerCAmelCase_ : int = 3_2 , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : str = "geglu" , lowerCAmelCase_ : Optional[int] = None , ):
"""simple docstring"""
super().__init__()
lowercase_ = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=lowerCAmelCase_ , attention_head_dim=lowerCAmelCase_ , in_channels=lowerCAmelCase_ , num_layers=lowerCAmelCase_ , dropout=lowerCAmelCase_ , norm_num_groups=lowerCAmelCase_ , cross_attention_dim=lowerCAmelCase_ , attention_bias=lowerCAmelCase_ , sample_size=lowerCAmelCase_ , num_vector_embeds=lowerCAmelCase_ , activation_fn=lowerCAmelCase_ , num_embeds_ada_norm=lowerCAmelCase_ , )
for _ in range(2)
])
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
lowercase_ = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
lowercase_ = [7_7, 2_5_7]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
lowercase_ = [1, 0]
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : bool = True , ):
"""simple docstring"""
lowercase_ = hidden_states
lowercase_ = []
lowercase_ = 0
# attention_mask is not used yet
for i in range(2):
# for each of the two transformers, pass the corresponding condition tokens
lowercase_ = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
lowercase_ = self.transformer_index_for_condition[i]
lowercase_ = self.transformers[transformer_index](
lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , timestep=lowerCAmelCase_ , cross_attention_kwargs=lowerCAmelCase_ , return_dict=lowerCAmelCase_ , )[0]
encoded_states.append(encoded_state - input_states)
tokens_start += self.condition_lengths[i]
lowercase_ = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
lowercase_ = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=lowerCAmelCase_)
| 313
|
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCAmelCase : Tuple = "platform"
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , ) -> List[Any]:
'''simple docstring'''
if attention_mask is None:
lowercase_ = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
lowercase_ = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
lowercase_ = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowercase_ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowercase_ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Optional[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int=1_3 , lowerCAmelCase_ : Optional[Any]=7 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : Dict=9_9 , lowerCAmelCase_ : List[Any]=1_6 , lowerCAmelCase_ : Union[str, Any]=2 , lowerCAmelCase_ : str=4 , lowerCAmelCase_ : Dict=4 , lowerCAmelCase_ : List[str]="gelu" , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : str=3_2 , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : Any=1 , lowerCAmelCase_ : str=0 , lowerCAmelCase_ : List[Any]=0.02 , ):
"""simple docstring"""
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = eos_token_id
lowercase_ = pad_token_id
lowercase_ = bos_token_id
lowercase_ = initializer_range
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size) , 3 , self.vocab_size)
lowercase_ = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa)) , -1)
lowercase_ = shift_tokens_right(lowerCAmelCase_ , 1 , 2)
lowercase_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowerCAmelCase_ , )
lowercase_ = prepare_blenderbot_inputs_dict(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
return config, inputs_dict
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ , lowercase_ = self.prepare_config_and_inputs()
return config, inputs_dict
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str):
"""simple docstring"""
lowercase_ = 2_0
lowercase_ = model_class_name(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""])
lowercase_ , lowercase_ = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowercase_ = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""")
lowercase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase_ = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""")
lowercase_ = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = model.decode(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''')
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any]):
"""simple docstring"""
lowercase_ = 2_0
lowercase_ = model_class_name(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""])
lowercase_ , lowercase_ = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowercase_ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
] , axis=-1 , )
lowercase_ = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase_ = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""")
lowercase_ = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = model.decode(lowerCAmelCase_ , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_)
lowercase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''')
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
lowercase__ = 99
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
lowercase_ = input_ids.shape[0]
lowercase_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ , lowercase_ , lowercase_ = self._get_config_and_data()
lowercase_ = FlaxBlenderbotForConditionalGeneration(lowerCAmelCase_)
lowercase_ = lm_model(input_ids=lowerCAmelCase_)
lowercase_ = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
lowercase_ = FlaxBlenderbotForConditionalGeneration(lowerCAmelCase_)
lowercase_ = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa)
lowercase_ = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa)
lowercase_ = lm_model(input_ids=lowerCAmelCase_ , decoder_input_ids=lowerCAmelCase_)
lowercase_ = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa)
lowercase_ = shift_tokens_right(lowerCAmelCase_ , 1 , 2)
lowercase_ = np.equal(lowerCAmelCase_ , 1).astype(np.floataa).sum()
lowercase_ = np.equal(lowerCAmelCase_ , 1).astype(np.floataa).sum()
self.assertEqual(shifted.shape , input_ids.shape)
self.assertEqual(lowerCAmelCase_ , n_pad_before - 1)
self.assertTrue(np.equal(shifted[:, 0] , 2).all())
@require_flax
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , unittest.TestCase , __UpperCAmelCase ):
lowercase__ = True
lowercase__ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowercase__ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = FlaxBlenderbotModelTester(self)
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowercase_ = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = model_class(lowerCAmelCase_)
@jax.jit
def encode_jitted(lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any]=None , **lowerCAmelCase_ : str):
return model.encode(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_)
with self.subTest("""JIT Enabled"""):
lowercase_ = encode_jitted(**lowerCAmelCase_).to_tuple()
with self.subTest("""JIT Disabled"""):
with jax.disable_jit():
lowercase_ = encode_jitted(**lowerCAmelCase_).to_tuple()
self.assertEqual(len(lowerCAmelCase_) , len(lowerCAmelCase_))
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowercase_ = model_class(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""])
lowercase_ = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any]):
return model.decode(
decoder_input_ids=lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , encoder_outputs=lowerCAmelCase_ , )
with self.subTest("""JIT Enabled"""):
lowercase_ = decode_jitted(**lowerCAmelCase_).to_tuple()
with self.subTest("""JIT Disabled"""):
with jax.disable_jit():
lowercase_ = decode_jitted(**lowerCAmelCase_).to_tuple()
self.assertEqual(len(lowerCAmelCase_) , len(lowerCAmelCase_))
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowercase_ = model_class_name.from_pretrained("""facebook/blenderbot-400M-distill""")
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
lowercase_ = np.ones((1, 1)) * model.config.eos_token_id
lowercase_ = model(lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
@unittest.skipUnless(jax_device != """cpu""" , """3B test too slow on CPU.""")
@slow
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = {"""num_beams""": 1, """early_stopping""": True, """min_length""": 1_5, """max_length""": 2_5}
lowercase_ = {"""skip_special_tokens""": True, """clean_up_tokenization_spaces""": True}
lowercase_ = FlaxBlenderbotForConditionalGeneration.from_pretrained("""facebook/blenderbot-3B""" , from_pt=lowerCAmelCase_)
lowercase_ = BlenderbotTokenizer.from_pretrained("""facebook/blenderbot-3B""")
lowercase_ = ["""Sam"""]
lowercase_ = tokenizer(lowerCAmelCase_ , return_tensors="""jax""")
lowercase_ = model.generate(**lowerCAmelCase_ , **lowerCAmelCase_)
lowercase_ = """Sam is a great name. It means \"sun\" in Gaelic."""
lowercase_ = tokenizer.batch_decode(lowerCAmelCase_ , **lowerCAmelCase_)
assert generated_txt[0].strip() == tgt_text
| 313
| 1
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = [1]
lowercase_ , lowercase_ , lowercase_ = 0, 0, 0
lowercase_ = ugly_nums[ia] * 2
lowercase_ = ugly_nums[ia] * 3
lowercase_ = ugly_nums[ia] * 5
for _ in range(1 , __lowerCAmelCase ):
lowercase_ = min(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
ugly_nums.append(__lowerCAmelCase )
if next_num == next_a:
ia += 1
lowercase_ = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
lowercase_ = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
lowercase_ = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F"{ugly_numbers(200) = }")
| 313
|
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
UpperCAmelCase : Dict = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
UpperCAmelCase : Union[str, Any] = 10
UpperCAmelCase : Union[str, Any] = 256
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Optional[MinHash]:
'''simple docstring'''
if len(__lowerCAmelCase ) < MIN_NUM_TOKENS:
return None
lowercase_ = MinHash(num_perm=__lowerCAmelCase )
for token in set(__lowerCAmelCase ):
min_hash.update(token.encode() )
return min_hash
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Set[str]:
'''simple docstring'''
return {t for t in NON_ALPHA.split(__lowerCAmelCase ) if len(t.strip() ) > 0}
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[str] , *,
lowerCAmelCase_ : float = 0.85 , ):
"""simple docstring"""
lowercase_ = duplication_jaccard_threshold
lowercase_ = NUM_PERM
lowercase_ = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm)
lowercase_ = defaultdict(lowerCAmelCase_)
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : MinHash):
"""simple docstring"""
lowercase_ = self._index.query(lowerCAmelCase_)
if code_key in self._index.keys:
print(F'''Duplicate key {code_key}''')
return
self._index.insert(lowerCAmelCase_ , lowerCAmelCase_)
if len(lowerCAmelCase_) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(lowerCAmelCase_)
break
else:
self._duplicate_clusters[close_duplicates[0]].add(lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = []
for base, duplicates in self._duplicate_clusters.items():
lowercase_ = [base] + list(lowerCAmelCase_)
# reformat the cluster to be a list of dict
lowercase_ = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(lowerCAmelCase_)
return duplicate_clusters
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : List[str]):
"""simple docstring"""
lowercase_ = self.get_duplicate_clusters()
with open(lowerCAmelCase_ , """w""") as f:
json.dump(lowerCAmelCase_ , lowerCAmelCase_)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ , lowercase_ = element
lowercase_ = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[str]:
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(__lowerCAmelCase , max_queue_size=1_00_00 ) , chunksize=1_00 , ):
if data is not None:
yield data
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = DuplicationIndex(duplication_jaccard_threshold=__lowerCAmelCase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(__lowerCAmelCase ) ) , max_queue_size=1_00 ) ):
di.add(__lowerCAmelCase , __lowerCAmelCase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> float:
'''simple docstring'''
lowercase_ = get_tokens(__lowerCAmelCase )
lowercase_ = get_tokens(__lowerCAmelCase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
UpperCAmelCase : Optional[Any] = None
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = []
for elementa in cluster:
lowercase_ = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
lowercase_ = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(__lowerCAmelCase , __lowerCAmelCase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
lowercase_ = 1
extremes.append(__lowerCAmelCase )
return extremes
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
global _shared_dataset
lowercase_ = dataset
lowercase_ = []
lowercase_ = partial(_find_cluster_extremes_shared , jaccard_threshold=__lowerCAmelCase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
__lowerCAmelCase , __lowerCAmelCase , ) , total=len(__lowerCAmelCase ) , ):
extremes_list.append(__lowerCAmelCase )
return extremes_list
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
'''simple docstring'''
lowercase_ = make_duplicate_clusters(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
lowercase_ = {}
lowercase_ = find_extremes(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
for extremes in extremes_clusters:
for element in extremes:
lowercase_ = element
lowercase_ = duplicate_indices - set(extreme_dict.keys() )
lowercase_ = dataset.filter(lambda __lowerCAmelCase , __lowerCAmelCase : idx not in remove_indices , with_indices=__lowerCAmelCase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
lowercase_ = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
lowercase_ = extreme_dict[element["""base_index"""]]["""copies"""]
print(F'''Original dataset size: {len(__lowerCAmelCase )}''' )
print(F'''Number of duplicate clusters: {len(__lowerCAmelCase )}''' )
print(F'''Files in duplicate cluster: {len(__lowerCAmelCase )}''' )
print(F'''Unique files in duplicate cluster: {len(__lowerCAmelCase )}''' )
print(F'''Filtered dataset size: {len(__lowerCAmelCase )}''' )
return ds_filter, duplicate_clusters
| 313
| 1
|
"""simple docstring"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def __init__( self : Any , lowerCAmelCase_ : Distribution , lowerCAmelCase_ : str=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : List[str]=0):
"""simple docstring"""
lowercase_ = 1.0 if scale is None else scale
lowercase_ = 0.0 if loc is None else loc
super().__init__(lowerCAmelCase_ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=lowerCAmelCase_)])
@property
def _UpperCAmelCase ( self : int):
"""simple docstring"""
return self.base_dist.mean * self.scale + self.loc
@property
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
return self.base_dist.variance * self.scale**2
@property
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
return self.variance.sqrt()
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Callable[..., Tuple[torch.Tensor]] , **lowerCAmelCase_ : str):
"""simple docstring"""
super().__init__(**lowerCAmelCase_)
lowercase_ = args_dim
lowercase_ = nn.ModuleList([nn.Linear(lowerCAmelCase_ , lowerCAmelCase_) for dim in args_dim.values()])
lowercase_ = domain_map
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : torch.Tensor):
"""simple docstring"""
lowercase_ = [proj(lowerCAmelCase_) for proj in self.proj]
return self.domain_map(*lowerCAmelCase_)
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : Tuple , lowerCAmelCase_ : List[str]):
"""simple docstring"""
super().__init__()
lowercase_ = function
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : Optional[int] , *lowerCAmelCase_ : List[str]):
"""simple docstring"""
return self.function(lowerCAmelCase_ , *lowerCAmelCase_)
class SCREAMING_SNAKE_CASE__ :
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
def __init__( self : str , lowerCAmelCase_ : int = 1):
"""simple docstring"""
lowercase_ = dim
lowercase_ = {k: dim * self.args_dim[k] for k in self.args_dim}
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : str):
"""simple docstring"""
if self.dim == 1:
return self.distribution_class(*lowerCAmelCase_)
else:
return Independent(self.distribution_class(*lowerCAmelCase_) , 1)
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[torch.Tensor] = None , lowerCAmelCase_ : Optional[torch.Tensor] = None , ):
"""simple docstring"""
lowercase_ = self._base_distribution(lowerCAmelCase_)
if loc is None and scale is None:
return distr
else:
return AffineTransformed(lowerCAmelCase_ , loc=lowerCAmelCase_ , scale=lowerCAmelCase_ , event_dim=self.event_dim)
@property
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
return () if self.dim == 1 else (self.dim,)
@property
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
return len(self.event_shape)
@property
def _UpperCAmelCase ( self : int):
"""simple docstring"""
return 0.0
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : int):
"""simple docstring"""
return ParameterProjection(
in_features=lowerCAmelCase_ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map) , )
def _UpperCAmelCase ( self : Dict , *lowerCAmelCase_ : torch.Tensor):
"""simple docstring"""
raise NotImplementedError()
@staticmethod
def _UpperCAmelCase ( lowerCAmelCase_ : torch.Tensor):
"""simple docstring"""
return (x + torch.sqrt(torch.square(lowerCAmelCase_) + 4.0)) / 2.0
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = {"df": 1, "loc": 1, "scale": 1}
lowercase__ = StudentT
@classmethod
def _UpperCAmelCase ( cls : Dict , lowerCAmelCase_ : torch.Tensor , lowerCAmelCase_ : torch.Tensor , lowerCAmelCase_ : torch.Tensor):
"""simple docstring"""
lowercase_ = cls.squareplus(lowerCAmelCase_).clamp_min(torch.finfo(scale.dtype).eps)
lowercase_ = 2.0 + cls.squareplus(lowerCAmelCase_)
return df.squeeze(-1), loc.squeeze(-1), scale.squeeze(-1)
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = {"loc": 1, "scale": 1}
lowercase__ = Normal
@classmethod
def _UpperCAmelCase ( cls : List[str] , lowerCAmelCase_ : torch.Tensor , lowerCAmelCase_ : torch.Tensor):
"""simple docstring"""
lowercase_ = cls.squareplus(lowerCAmelCase_).clamp_min(torch.finfo(scale.dtype).eps)
return loc.squeeze(-1), scale.squeeze(-1)
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = {"total_count": 1, "logits": 1}
lowercase__ = NegativeBinomial
@classmethod
def _UpperCAmelCase ( cls : int , lowerCAmelCase_ : torch.Tensor , lowerCAmelCase_ : torch.Tensor):
"""simple docstring"""
lowercase_ = cls.squareplus(lowerCAmelCase_)
return total_count.squeeze(-1), logits.squeeze(-1)
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Optional[int]):
"""simple docstring"""
lowercase_ , lowercase_ = distr_args
if self.dim == 1:
return self.distribution_class(total_count=lowerCAmelCase_ , logits=lowerCAmelCase_)
else:
return Independent(self.distribution_class(total_count=lowerCAmelCase_ , logits=lowerCAmelCase_) , 1)
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[torch.Tensor] = None , lowerCAmelCase_ : Optional[torch.Tensor] = None):
"""simple docstring"""
lowercase_ , lowercase_ = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits))
| 313
|
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
UpperCAmelCase : Union[str, Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["memory_attention", "encoder_attn"],
["attention", "attn"],
["/", "."],
[".LayerNorm.gamma", "_layer_norm.weight"],
[".LayerNorm.beta", "_layer_norm.bias"],
["r.layer_", "r.layers."],
["output_proj", "out_proj"],
["ffn.dense_1.", "fc2."],
["ffn.dense.", "fc1."],
["ffn_layer_norm", "final_layer_norm"],
["kernel", "weight"],
["encoder_layer_norm.", "encoder.layer_norm."],
["decoder_layer_norm.", "decoder.layer_norm."],
["embeddings.weights", "shared.weight"],
]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
lowercase_ = k.replace(__lowerCAmelCase , __lowerCAmelCase )
return k
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> PegasusForConditionalGeneration:
'''simple docstring'''
lowercase_ = DEFAULTS.copy()
cfg_kwargs.update(__lowerCAmelCase )
lowercase_ = PegasusConfig(**__lowerCAmelCase )
lowercase_ = PegasusForConditionalGeneration(__lowerCAmelCase )
lowercase_ = torch_model.model.state_dict()
lowercase_ = {}
for k, v in tf_weights.items():
lowercase_ = rename_state_dict_key(__lowerCAmelCase )
if new_k not in sd:
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
lowercase_ = v.T
lowercase_ = torch.tensor(__lowerCAmelCase , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
lowercase_ = torch.zeros_like(mapping["""shared.weight"""][cfg.pad_token_id + 1] )
lowercase_ = mapping["""shared.weight"""]
lowercase_ = mapping["""shared.weight"""]
lowercase_ = {k: torch.zeros_like(__lowerCAmelCase ) for k, v in sd.items() if k.endswith("""bias""" ) and k not in mapping}
mapping.update(**__lowerCAmelCase )
lowercase_ , lowercase_ = torch_model.model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
lowercase_ = [
k for k in missing if k not in ["""encoder.embed_positions.weight""", """decoder.embed_positions.weight"""]
]
assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], F'''no matches found for the following tf keys {extra}'''
return torch_model
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
'''simple docstring'''
lowercase_ = tf.train.list_variables(__lowerCAmelCase )
lowercase_ = {}
lowercase_ = ["""Adafactor""", """global_step"""]
for name, shape in tqdm(__lowerCAmelCase , desc="""converting tf checkpoint to dict""" ):
lowercase_ = any(pat in name for pat in ignore_name )
if skip_key:
continue
lowercase_ = tf.train.load_variable(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = array
return tf_weights
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = Path(__lowerCAmelCase ).parent.name
lowercase_ = task_specific_params[F'''summarization_{dataset}''']["""max_position_embeddings"""]
lowercase_ = PegasusTokenizer.from_pretrained("""sshleifer/pegasus""" , model_max_length=__lowerCAmelCase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(__lowerCAmelCase )
# convert model
lowercase_ = get_tf_weights_as_numpy(__lowerCAmelCase )
lowercase_ = task_specific_params[F'''summarization_{dataset}''']
if dataset == "large":
lowercase_ = task_specific_params
lowercase_ = convert_pegasus(__lowerCAmelCase , __lowerCAmelCase )
torch_model.save_pretrained(__lowerCAmelCase )
lowercase_ = torch_model.state_dict()
sd.pop("""model.decoder.embed_positions.weight""" )
sd.pop("""model.encoder.embed_positions.weight""" )
torch.save(__lowerCAmelCase , Path(__lowerCAmelCase ) / """pytorch_model.bin""" )
if __name__ == "__main__":
UpperCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
UpperCAmelCase : List[Any] = parser.parse_args()
if args.save_dir is None:
UpperCAmelCase : List[str] = Path(args.tf_ckpt_path).parent.name
UpperCAmelCase : int = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 313
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase : Any = {
"configuration_mobilebert": [
"MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"MobileBertConfig",
"MobileBertOnnxConfig",
],
"tokenization_mobilebert": ["MobileBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[Any] = ["MobileBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[int] = [
"MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileBertForMaskedLM",
"MobileBertForMultipleChoice",
"MobileBertForNextSentencePrediction",
"MobileBertForPreTraining",
"MobileBertForQuestionAnswering",
"MobileBertForSequenceClassification",
"MobileBertForTokenClassification",
"MobileBertLayer",
"MobileBertModel",
"MobileBertPreTrainedModel",
"load_tf_weights_in_mobilebert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Union[str, Any] = [
"TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileBertForMaskedLM",
"TFMobileBertForMultipleChoice",
"TFMobileBertForNextSentencePrediction",
"TFMobileBertForPreTraining",
"TFMobileBertForQuestionAnswering",
"TFMobileBertForSequenceClassification",
"TFMobileBertForTokenClassification",
"TFMobileBertMainLayer",
"TFMobileBertModel",
"TFMobileBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 313
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
def _SCREAMING_SNAKE_CASE () -> Generator[int, None, None]:
'''simple docstring'''
lowercase_ = {}
lowercase_ = 2
while True:
lowercase_ = factor_map.pop(__lowerCAmelCase , __lowerCAmelCase )
if factor:
lowercase_ = factor + prime
while x in factor_map:
x += factor
lowercase_ = factor
else:
lowercase_ = prime
yield prime
prime += 1
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = 1E10 ) -> int:
'''simple docstring'''
lowercase_ = sieve()
lowercase_ = 1
while True:
lowercase_ = next(__lowerCAmelCase )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(__lowerCAmelCase )
n += 2
if __name__ == "__main__":
print(solution())
| 313
| 1
|
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _SCREAMING_SNAKE_CASE () -> List[str]:
'''simple docstring'''
lowercase_ = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=__lowerCAmelCase , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=__lowerCAmelCase , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=__lowerCAmelCase )
return parser.parse_args()
def _SCREAMING_SNAKE_CASE () -> str:
'''simple docstring'''
lowercase_ = parse_args()
# Import training_script as a module.
lowercase_ = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowercase_ = script_fpath.stem
lowercase_ = importlib.import_module(__lowerCAmelCase )
# Patch sys.argv
lowercase_ = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 313
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Any , lowerCAmelCase_ : int = 6):
"""simple docstring"""
lowercase_ = None
lowercase_ = None
self.create_linked_list(lowerCAmelCase_)
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ = Node()
lowercase_ = current_node
lowercase_ = current_node
lowercase_ = current_node
for _ in range(1 , lowerCAmelCase_):
lowercase_ = Node()
lowercase_ = current_node
lowercase_ = previous_node
lowercase_ = current_node
lowercase_ = self.front
lowercase_ = previous_node
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
self.check_can_perform_operation()
return self.front.data if self.front else None
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : Any):
"""simple docstring"""
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
lowercase_ = self.rear.next
if self.rear:
lowercase_ = data
def _UpperCAmelCase ( self : str):
"""simple docstring"""
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
lowercase_ = self.front.data
lowercase_ = None
return data
lowercase_ = self.front
lowercase_ = old_front.next
lowercase_ = old_front.data
lowercase_ = None
return data
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
if self.is_empty():
raise Exception("""Empty Queue""")
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""")
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[str]):
"""simple docstring"""
lowercase_ = None
lowercase_ = None
lowercase_ = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
UpperCAmelCase : List[str] = {"configuration_dpt": ["DPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPTConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Dict = ["DPTFeatureExtractor"]
UpperCAmelCase : int = ["DPTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Union[str, Any] = [
"DPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DPTForDepthEstimation",
"DPTForSemanticSegmentation",
"DPTModel",
"DPTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 313
|
"""simple docstring"""
from collections.abc import Sequence
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase = False ) -> float:
'''simple docstring'''
if not arr:
return 0
lowercase_ = 0 if allow_empty_subarrays else float("""-inf""" )
lowercase_ = 0.0
for num in arr:
lowercase_ = max(0 if allow_empty_subarrays else num , curr_sum + num )
lowercase_ = max(__lowerCAmelCase , __lowerCAmelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase : Union[str, Any] = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F"{max_subarray_sum(nums) = }")
| 313
| 1
|
"""simple docstring"""
from __future__ import annotations
UpperCAmelCase : Any = "Muhammad Umer Farooq"
UpperCAmelCase : Union[str, Any] = "MIT"
UpperCAmelCase : Tuple = "1.0.0"
UpperCAmelCase : Tuple = "Muhammad Umer Farooq"
UpperCAmelCase : str = "contact@muhammadumerfarooq.me"
UpperCAmelCase : Union[str, Any] = "Alpha"
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def __init__( self : Optional[Any] , lowerCAmelCase_ : str):
"""simple docstring"""
super().__init__()
lowercase_ = []
lowercase_ = domain
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : list[tuple[str, str | None]]):
"""simple docstring"""
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
lowercase_ = parse.urljoin(self.domain , lowerCAmelCase_)
self.urls.append(lowerCAmelCase_)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> str:
'''simple docstring'''
return ".".join(get_sub_domain_name(__lowerCAmelCase ).split(""".""" )[-2:] )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> str:
'''simple docstring'''
return parse.urlparse(__lowerCAmelCase ).netloc
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = "https://github.com" ) -> list[str]:
'''simple docstring'''
lowercase_ = get_domain_name(__lowerCAmelCase )
# Initialize the parser
lowercase_ = Parser(__lowerCAmelCase )
try:
# Open URL
lowercase_ = requests.get(__lowerCAmelCase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
lowercase_ = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
lowercase_ = requests.get(__lowerCAmelCase )
# Get the valid email.
lowercase_ = re.findall("""[a-zA-Z0-9]+@""" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(__lowerCAmelCase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(__lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase : Optional[int] = emails_from_url("https://github.com")
print(F"{len(emails)} emails found:")
print("\n".join(sorted(emails)))
| 313
|
"""simple docstring"""
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
UpperCAmelCase : Optional[int] = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[Any]=None):
"""simple docstring"""
lowercase_ = self.layer[current_layer](lowerCAmelCase_ , lowerCAmelCase_ , head_mask[current_layer])
lowercase_ = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , __UpperCAmelCase , )
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def __init__( self : Any , lowerCAmelCase_ : Dict):
"""simple docstring"""
super().__init__(lowerCAmelCase_)
lowercase_ = BertEncoderWithPabee(lowerCAmelCase_)
self.init_weights()
lowercase_ = 0
lowercase_ = 0
lowercase_ = 0
lowercase_ = 0
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ = threshold
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
lowercase_ = patience
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = 0
lowercase_ = 0
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = self.inference_layers_num / self.inference_instances_num
lowercase_ = (
F'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='''
F''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'''
)
print(lowerCAmelCase_)
@add_start_docstrings_to_model_forward(lowerCAmelCase_)
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : str=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[Any]=False , ):
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""")
elif input_ids is not None:
lowercase_ = input_ids.size()
elif inputs_embeds is not None:
lowercase_ = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""")
lowercase_ = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
lowercase_ = torch.ones(lowerCAmelCase_ , device=lowerCAmelCase_)
if token_type_ids is None:
lowercase_ = torch.zeros(lowerCAmelCase_ , dtype=torch.long , device=lowerCAmelCase_)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
lowercase_ = self.get_extended_attention_mask(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
lowercase_ , lowercase_ , lowercase_ = encoder_hidden_states.size()
lowercase_ = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
lowercase_ = torch.ones(lowerCAmelCase_ , device=lowerCAmelCase_)
lowercase_ = self.invert_attention_mask(lowerCAmelCase_)
else:
lowercase_ = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
lowercase_ = self.get_head_mask(lowerCAmelCase_ , self.config.num_hidden_layers)
lowercase_ = self.embeddings(
input_ids=lowerCAmelCase_ , position_ids=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , inputs_embeds=lowerCAmelCase_)
lowercase_ = embedding_output
if self.training:
lowercase_ = []
for i in range(self.config.num_hidden_layers):
lowercase_ = self.encoder.adaptive_forward(
lowerCAmelCase_ , current_layer=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , head_mask=lowerCAmelCase_)
lowercase_ = self.pooler(lowerCAmelCase_)
lowercase_ = output_layers[i](output_dropout(lowerCAmelCase_))
res.append(lowerCAmelCase_)
elif self.patience == 0: # Use all layers for inference
lowercase_ = self.encoder(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , )
lowercase_ = self.pooler(encoder_outputs[0])
lowercase_ = [output_layers[self.config.num_hidden_layers - 1](lowerCAmelCase_)]
else:
lowercase_ = 0
lowercase_ = None
lowercase_ = 0
for i in range(self.config.num_hidden_layers):
calculated_layer_num += 1
lowercase_ = self.encoder.adaptive_forward(
lowerCAmelCase_ , current_layer=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , head_mask=lowerCAmelCase_)
lowercase_ = self.pooler(lowerCAmelCase_)
lowercase_ = output_layers[i](lowerCAmelCase_)
if regression:
lowercase_ = logits.detach()
if patient_result is not None:
lowercase_ = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels) < self.regression_threshold:
patient_counter += 1
else:
lowercase_ = 0
else:
lowercase_ = logits.detach().argmax(dim=1)
if patient_result is not None:
lowercase_ = patient_result.detach().argmax(dim=1)
if (patient_result is not None) and torch.all(labels.eq(lowerCAmelCase_)):
patient_counter += 1
else:
lowercase_ = 0
lowercase_ = logits
if patient_counter == self.patience:
break
lowercase_ = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , __UpperCAmelCase , )
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def __init__( self : Optional[Any] , lowerCAmelCase_ : str):
"""simple docstring"""
super().__init__(lowerCAmelCase_)
lowercase_ = config.num_labels
lowercase_ = BertModelWithPabee(lowerCAmelCase_)
lowercase_ = nn.Dropout(config.hidden_dropout_prob)
lowercase_ = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels) for _ in range(config.num_hidden_layers)])
self.init_weights()
@add_start_docstrings_to_model_forward(lowerCAmelCase_)
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : List[str]=None , ):
"""simple docstring"""
lowercase_ = self.bert(
input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , position_ids=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , inputs_embeds=lowerCAmelCase_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
lowercase_ = (logits[-1],)
if labels is not None:
lowercase_ = None
lowercase_ = 0
for ix, logits_item in enumerate(lowerCAmelCase_):
if self.num_labels == 1:
# We are doing regression
lowercase_ = MSELoss()
lowercase_ = loss_fct(logits_item.view(-1) , labels.view(-1))
else:
lowercase_ = CrossEntropyLoss()
lowercase_ = loss_fct(logits_item.view(-1 , self.num_labels) , labels.view(-1))
if total_loss is None:
lowercase_ = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
lowercase_ = (total_loss / total_weights,) + outputs
return outputs
| 313
| 1
|
"""simple docstring"""
from heapq import heappop, heappush
import numpy as np
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ) -> tuple[float | int, list[tuple[int, int]]]:
'''simple docstring'''
lowercase_ , lowercase_ = grid.shape
lowercase_ = [-1, 1, 0, 0]
lowercase_ = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
lowercase_ , lowercase_ = [(0, source)], set()
lowercase_ = np.full((rows, cols) , np.inf )
lowercase_ = 0
lowercase_ = np.empty((rows, cols) , dtype=__lowerCAmelCase )
lowercase_ = None
while queue:
((lowercase_) , (lowercase_)) = heappop(__lowerCAmelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
lowercase_ = []
while (x, y) != source:
path.append((x, y) )
lowercase_ , lowercase_ = predecessors[x, y]
path.append(__lowerCAmelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(__lowerCAmelCase ) ):
lowercase_ , lowercase_ = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
lowercase_ = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(__lowerCAmelCase , (dist + 1, (nx, ny)) )
lowercase_ = dist + 1
lowercase_ = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313
|
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Any:
'''simple docstring'''
if (
(cp >= 0x4e00 and cp <= 0x9fff)
or (cp >= 0x3400 and cp <= 0x4dbf) #
or (cp >= 0x20000 and cp <= 0x2a6df) #
or (cp >= 0x2a700 and cp <= 0x2b73f) #
or (cp >= 0x2b740 and cp <= 0x2b81f) #
or (cp >= 0x2b820 and cp <= 0x2ceaf) #
or (cp >= 0xf900 and cp <= 0xfaff)
or (cp >= 0x2f800 and cp <= 0x2fa1f) #
): #
return True
return False
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[str]:
'''simple docstring'''
for char in word:
lowercase_ = ord(__lowerCAmelCase )
if not _is_chinese_char(__lowerCAmelCase ):
return 0
return 1
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Any:
'''simple docstring'''
lowercase_ = set()
for token in tokens:
lowercase_ = len(__lowerCAmelCase ) > 1 and is_chinese(__lowerCAmelCase )
if chinese_word:
word_set.add(__lowerCAmelCase )
lowercase_ = list(__lowerCAmelCase )
return word_list
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
lowercase_ = max([len(__lowerCAmelCase ) for w in chinese_word_set] )
lowercase_ = bert_tokens
lowercase_ , lowercase_ = 0, len(__lowerCAmelCase )
while start < end:
lowercase_ = True
if is_chinese(bert_word[start] ):
lowercase_ = min(end - start , __lowerCAmelCase )
for i in range(__lowerCAmelCase , 1 , -1 ):
lowercase_ = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
lowercase_ = """##""" + bert_word[j]
lowercase_ = start + i
lowercase_ = False
break
if single_word:
start += 1
return bert_word
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = []
for i in range(0 , len(__lowerCAmelCase ) , 1_00 ):
lowercase_ = ltp_tokenizer.seg(lines[i : i + 1_00] )[0]
lowercase_ = [get_chinese_word(__lowerCAmelCase ) for r in res]
ltp_res.extend(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
lowercase_ = []
for i in range(0 , len(__lowerCAmelCase ) , 1_00 ):
lowercase_ = bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=5_12 )
bert_res.extend(res["""input_ids"""] )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
lowercase_ = []
for input_ids, chinese_word in zip(__lowerCAmelCase , __lowerCAmelCase ):
lowercase_ = []
for id in input_ids:
lowercase_ = bert_tokenizer._convert_id_to_token(__lowerCAmelCase )
input_tokens.append(__lowerCAmelCase )
lowercase_ = add_sub_symbol(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__lowerCAmelCase ):
if token[:2] == "##":
lowercase_ = token[2:]
# save chinese tokens' pos
if len(__lowerCAmelCase ) == 1 and _is_chinese_char(ord(__lowerCAmelCase ) ):
ref_id.append(__lowerCAmelCase )
ref_ids.append(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
return ref_ids
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
with open(args.file_name , """r""" , encoding="""utf-8""" ) as f:
lowercase_ = f.readlines()
lowercase_ = [line.strip() for line in data if len(__lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
lowercase_ = LTP(args.ltp ) # faster in GPU device
lowercase_ = BertTokenizer.from_pretrained(args.bert )
lowercase_ = prepare_ref(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
with open(args.save_path , """w""" , encoding="""utf-8""" ) as f:
lowercase_ = [json.dumps(__lowerCAmelCase ) + """\n""" for ref in ref_ids]
f.writelines(__lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase : List[Any] = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp", type=str, default="./resources/ltp", help="resources for LTP tokenizer, usually a path"
)
parser.add_argument("--bert", type=str, default="./resources/robert", help="resources for Bert tokenizer")
parser.add_argument("--save_path", type=str, default="./resources/ref.txt", help="path to save res")
UpperCAmelCase : int = parser.parse_args()
main(args)
| 313
| 1
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
lowercase__ = StableUnCLIPImgaImgPipeline
lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowercase__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowercase__ = frozenset([] )
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = 3_2
lowercase_ = embedder_hidden_size
# image encoding components
lowercase_ = CLIPImageProcessor(crop_size=3_2 , size=3_2)
torch.manual_seed(0)
lowercase_ = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=lowerCAmelCase_ , projection_dim=lowerCAmelCase_ , num_hidden_layers=5 , num_attention_heads=4 , image_size=3_2 , intermediate_size=3_7 , patch_size=1 , ))
# regular denoising components
torch.manual_seed(0)
lowercase_ = StableUnCLIPImageNormalizer(embedding_dim=lowerCAmelCase_)
lowercase_ = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""")
torch.manual_seed(0)
lowercase_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""")
torch.manual_seed(0)
lowercase_ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCAmelCase_ , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ))
torch.manual_seed(0)
lowercase_ = UNetaDConditionModel(
sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(3_2, 6_4) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCAmelCase_ , layers_per_block=1 , upcast_attention=lowerCAmelCase_ , use_linear_projection=lowerCAmelCase_ , )
torch.manual_seed(0)
lowercase_ = DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="""v_prediction""" , set_alpha_to_one=lowerCAmelCase_ , steps_offset=1 , )
torch.manual_seed(0)
lowercase_ = AutoencoderKL()
lowercase_ = {
# image encoding components
"""feature_extractor""": feature_extractor,
"""image_encoder""": image_encoder.eval(),
# image noising components
"""image_normalizer""": image_normalizer.eval(),
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder.eval(),
"""unet""": unet.eval(),
"""scheduler""": scheduler,
"""vae""": vae.eval(),
}
return components
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Any=0 , lowerCAmelCase_ : List[str]=True):
"""simple docstring"""
if str(lowerCAmelCase_).startswith("""mps"""):
lowercase_ = torch.manual_seed(lowerCAmelCase_)
else:
lowercase_ = torch.Generator(device=lowerCAmelCase_).manual_seed(lowerCAmelCase_)
lowercase_ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCAmelCase_)).to(lowerCAmelCase_)
if pil_image:
lowercase_ = input_image * 0.5 + 0.5
lowercase_ = input_image.clamp(0 , 1)
lowercase_ = input_image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
lowercase_ = DiffusionPipeline.numpy_to_pil(lowerCAmelCase_)[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase_ = self.get_dummy_components()
lowercase_ = StableUnCLIPImgaImgPipeline(**lowerCAmelCase_)
lowercase_ = sd_pipe.to(lowerCAmelCase_)
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_)
lowercase_ = self.get_dummy_inputs(lowerCAmelCase_)
inputs.update({"""image_embeds""": None})
lowercase_ = sd_pipe(**lowerCAmelCase_).images
lowercase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowercase_ = np.array([0.3_872, 0.7_224, 0.5_601, 0.4_741, 0.6_872, 0.5_814, 0.4_636, 0.3_867, 0.5_078])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = torch_device in ["""cpu""", """mps"""]
self._test_attention_slicing_forward_pass(test_max_difference=lowerCAmelCase_)
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=lowerCAmelCase_)
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCAmelCase_)
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""")
lowercase_ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy""")
lowercase_ = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-l-img2img""" , torch_dtype=torch.floataa)
pipe.to(lowerCAmelCase_)
pipe.set_progress_bar_config(disable=lowerCAmelCase_)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase_ = torch.Generator(device="""cpu""").manual_seed(0)
lowercase_ = pipe(lowerCAmelCase_ , """anime turle""" , generator=lowerCAmelCase_ , output_type="""np""")
lowercase_ = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""")
lowercase_ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy""")
lowercase_ = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa)
pipe.to(lowerCAmelCase_)
pipe.set_progress_bar_config(disable=lowerCAmelCase_)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase_ = torch.Generator(device="""cpu""").manual_seed(0)
lowercase_ = pipe(lowerCAmelCase_ , """anime turle""" , generator=lowerCAmelCase_ , output_type="""np""")
lowercase_ = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""")
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase_ = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa)
lowercase_ = pipe.to(lowerCAmelCase_)
pipe.set_progress_bar_config(disable=lowerCAmelCase_)
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase_ = pipe(
lowerCAmelCase_ , """anime turtle""" , num_inference_steps=2 , output_type="""np""" , )
lowercase_ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 1_0**9
| 313
|
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> list[str]:
'''simple docstring'''
if nth_term == "":
return [""]
lowercase_ = int(__lowerCAmelCase )
lowercase_ = int(__lowerCAmelCase )
lowercase_ = []
for temp in range(int(__lowerCAmelCase ) ):
series.append(F'''1 / {pow(temp + 1 , int(__lowerCAmelCase ) )}''' if series else """1""" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase : List[str] = int(input("Enter the last number (nth term) of the P-Series"))
UpperCAmelCase : Tuple = int(input("Enter the power for P-Series"))
print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
print(p_series(nth_term, power))
| 313
| 1
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Dict:
'''simple docstring'''
lowercase_ = 1
lowercase_ = 2
while i * i <= n:
lowercase_ = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def _SCREAMING_SNAKE_CASE () -> int:
'''simple docstring'''
lowercase_ = 1
lowercase_ = 1
while True:
i += 1
t_num += i
if count_divisors(__lowerCAmelCase ) > 5_00:
break
return t_num
if __name__ == "__main__":
print(solution())
| 313
|
"""simple docstring"""
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = Vector([1, 2, 3])
self.assertEqual(x.component(0) , 1)
self.assertEqual(x.component(2) , 3)
lowercase_ = Vector()
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = Vector([0, 0, 0, 0, 0, 1])
self.assertEqual(str(lowerCAmelCase_) , """(0,0,0,0,0,1)""")
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = Vector([1, 2, 3, 4])
self.assertEqual(len(lowerCAmelCase_) , 4)
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = Vector([1, 2])
lowercase_ = Vector([1, 2, 3, 4, 5])
lowercase_ = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
lowercase_ = Vector([1, -1, 1, -1, 2, -3, 4, -5])
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3)
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3)
self.assertEqual(z.euclidean_length() , 0)
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = Vector([1, 2, 3])
lowercase_ = Vector([1, 1, 1])
self.assertEqual((x + y).component(0) , 2)
self.assertEqual((x + y).component(1) , 3)
self.assertEqual((x + y).component(2) , 4)
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = Vector([1, 2, 3])
lowercase_ = Vector([1, 1, 1])
self.assertEqual((x - y).component(0) , 0)
self.assertEqual((x - y).component(1) , 1)
self.assertEqual((x - y).component(2) , 2)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = Vector([1, 2, 3])
lowercase_ = Vector([2, -1, 4]) # for test of dot product
lowercase_ = Vector([1, -2, -1])
self.assertEqual(str(x * 3.0) , """(3.0,6.0,9.0)""")
self.assertEqual((a * b) , 0)
def _UpperCAmelCase ( self : int):
"""simple docstring"""
self.assertEqual(str(zero_vector(1_0)).count("""0""") , 1_0)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
self.assertEqual(str(unit_basis_vector(3 , 1)) , """(0,1,0)""")
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = Vector([1, 2, 3])
lowercase_ = Vector([1, 0, 1])
self.assertEqual(str(axpy(2 , lowerCAmelCase_ , lowerCAmelCase_)) , """(3,4,7)""")
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = Vector([1, 0, 0, 0, 0, 0])
lowercase_ = x.copy()
self.assertEqual(str(lowerCAmelCase_) , str(lowerCAmelCase_))
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = Vector([1, 0, 0])
x.change_component(0 , 0)
x.change_component(1 , 1)
self.assertEqual(str(lowerCAmelCase_) , """(0,1,0)""")
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(lowerCAmelCase_))
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
lowercase_ = [[-3, -1_4, -1_0], [-5, -1_0, -5], [-2, -1, 0]]
for x in range(a.height()):
for y in range(a.width()):
self.assertEqual(minors[x][y] , a.minor(lowerCAmelCase_ , lowerCAmelCase_))
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
lowercase_ = [[-3, 1_4, -1_0], [5, -1_0, 5], [-2, 1, 0]]
for x in range(a.height()):
for y in range(a.width()):
self.assertEqual(cofactors[x][y] , a.cofactor(lowerCAmelCase_ , lowerCAmelCase_))
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
self.assertEqual(-5 , a.determinant())
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3)
lowercase_ = Vector([1, 2, 3])
self.assertEqual("""(14,32,50)""" , str(a * x))
self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2))
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
a.change_component(0 , 2 , 5)
self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(lowerCAmelCase_))
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
self.assertEqual(7 , a.component(2 , 1) , 0.01)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
lowercase_ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 1_0]] , 3 , 3)
self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b))
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
lowercase_ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 1_0]] , 3 , 3)
self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b))
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
self.assertEqual(
"""|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5)) , )
if __name__ == "__main__":
unittest.main()
| 313
| 1
|
"""simple docstring"""
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
UpperCAmelCase : Tuple = open # noqa: we just need to have a builtin inside this module to test it properly
| 313
|
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = 0
if start < end:
lowercase_ = randint(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = a[end]
lowercase_ = a[pivot]
lowercase_ = temp
lowercase_ , lowercase_ = _in_place_partition(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
count += _in_place_quick_sort(__lowerCAmelCase , __lowerCAmelCase , p - 1 )
count += _in_place_quick_sort(__lowerCAmelCase , p + 1 , __lowerCAmelCase )
return count
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = 0
lowercase_ = randint(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = a[end]
lowercase_ = a[pivot]
lowercase_ = temp
lowercase_ = start - 1
for index in range(__lowerCAmelCase , __lowerCAmelCase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
lowercase_ = new_pivot_index + 1
lowercase_ = a[new_pivot_index]
lowercase_ = a[index]
lowercase_ = temp
lowercase_ = a[new_pivot_index + 1]
lowercase_ = a[end]
lowercase_ = temp
return new_pivot_index + 1, count
UpperCAmelCase : Union[str, Any] = TemporaryFile()
UpperCAmelCase : Optional[int] = 100 # 1000 elements are to be sorted
UpperCAmelCase , UpperCAmelCase : List[str] = 0, 1 # mean and standard deviation
UpperCAmelCase : Optional[Any] = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
UpperCAmelCase : List[str] = np.load(outfile)
UpperCAmelCase : List[Any] = len(M) - 1
UpperCAmelCase : Optional[int] = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 313
| 1
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase : List[str] = logging.get_logger(__name__)
UpperCAmelCase : List[str] = {
"SenseTime/deformable-detr": "https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "deformable_detr"
lowercase__ = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : Tuple , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : int=3 , lowerCAmelCase_ : Optional[int]=3_0_0 , lowerCAmelCase_ : int=1_0_2_4 , lowerCAmelCase_ : Optional[Any]=6 , lowerCAmelCase_ : List[Any]=1_0_2_4 , lowerCAmelCase_ : str=8 , lowerCAmelCase_ : List[str]=6 , lowerCAmelCase_ : Union[str, Any]=1_0_2_4 , lowerCAmelCase_ : Optional[int]=8 , lowerCAmelCase_ : Dict=0.0 , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : int="relu" , lowerCAmelCase_ : Optional[int]=2_5_6 , lowerCAmelCase_ : List[Any]=0.1 , lowerCAmelCase_ : str=0.0 , lowerCAmelCase_ : List[Any]=0.0 , lowerCAmelCase_ : Tuple=0.02 , lowerCAmelCase_ : Optional[int]=1.0 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : List[str]="sine" , lowerCAmelCase_ : Optional[int]="resnet50" , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Dict=4 , lowerCAmelCase_ : Optional[Any]=4 , lowerCAmelCase_ : int=4 , lowerCAmelCase_ : str=False , lowerCAmelCase_ : Optional[Any]=3_0_0 , lowerCAmelCase_ : Dict=False , lowerCAmelCase_ : Optional[int]=1 , lowerCAmelCase_ : List[Any]=5 , lowerCAmelCase_ : List[Any]=2 , lowerCAmelCase_ : List[Any]=1 , lowerCAmelCase_ : Tuple=1 , lowerCAmelCase_ : Any=5 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : Optional[int]=0.25 , lowerCAmelCase_ : List[str]=False , **lowerCAmelCase_ : Dict , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""")
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""")
lowercase_ = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""])
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_):
lowercase_ = backbone_config.get("""model_type""")
lowercase_ = CONFIG_MAPPING[backbone_model_type]
lowercase_ = config_class.from_dict(lowerCAmelCase_)
lowercase_ = use_timm_backbone
lowercase_ = backbone_config
lowercase_ = num_channels
lowercase_ = num_queries
lowercase_ = max_position_embeddings
lowercase_ = d_model
lowercase_ = encoder_ffn_dim
lowercase_ = encoder_layers
lowercase_ = encoder_attention_heads
lowercase_ = decoder_ffn_dim
lowercase_ = decoder_layers
lowercase_ = decoder_attention_heads
lowercase_ = dropout
lowercase_ = attention_dropout
lowercase_ = activation_dropout
lowercase_ = activation_function
lowercase_ = init_std
lowercase_ = init_xavier_std
lowercase_ = encoder_layerdrop
lowercase_ = auxiliary_loss
lowercase_ = position_embedding_type
lowercase_ = backbone
lowercase_ = use_pretrained_backbone
lowercase_ = dilation
# deformable attributes
lowercase_ = num_feature_levels
lowercase_ = encoder_n_points
lowercase_ = decoder_n_points
lowercase_ = two_stage
lowercase_ = two_stage_num_proposals
lowercase_ = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("""If two_stage is True, with_box_refine must be True.""")
# Hungarian matcher
lowercase_ = class_cost
lowercase_ = bbox_cost
lowercase_ = giou_cost
# Loss coefficients
lowercase_ = mask_loss_coefficient
lowercase_ = dice_loss_coefficient
lowercase_ = bbox_loss_coefficient
lowercase_ = giou_loss_coefficient
lowercase_ = eos_coefficient
lowercase_ = focal_alpha
lowercase_ = disable_custom_kernels
super().__init__(is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_)
@property
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
return self.encoder_attention_heads
@property
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
return self.d_model
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = copy.deepcopy(self.__dict__)
if self.backbone_config is not None:
lowercase_ = self.backbone_config.to_dict()
lowercase_ = self.__class__.model_type
return output
| 313
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase=False ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''module.blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''module.blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""module.cls_token""", """vit.embeddings.cls_token"""),
("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""module.pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""module.norm.weight""", """layernorm.weight"""),
("""module.norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowercase_ = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ) -> Any:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
lowercase_ = """"""
else:
lowercase_ = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase_ = state_dict.pop(F'''module.blocks.{i}.attn.qkv.weight''' )
lowercase_ = state_dict.pop(F'''module.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase_ = in_proj_weight[
: config.hidden_size, :
]
lowercase_ = in_proj_bias[: config.hidden_size]
lowercase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase_ = in_proj_weight[
-config.hidden_size :, :
]
lowercase_ = in_proj_bias[-config.hidden_size :]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase_ = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = [
"""module.fc.fc1.weight""",
"""module.fc.fc1.bias""",
"""module.fc.bn1.weight""",
"""module.fc.bn1.bias""",
"""module.fc.bn1.running_mean""",
"""module.fc.bn1.running_var""",
"""module.fc.bn1.num_batches_tracked""",
"""module.fc.fc2.weight""",
"""module.fc.fc2.bias""",
"""module.fc.bn2.weight""",
"""module.fc.bn2.bias""",
"""module.fc.bn2.running_mean""",
"""module.fc.bn2.running_var""",
"""module.fc.bn2.num_batches_tracked""",
"""module.fc.fc3.weight""",
"""module.fc.fc3.bias""",
]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = dct.pop(__lowerCAmelCase )
lowercase_ = val
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = ViTMSNConfig()
lowercase_ = 10_00
lowercase_ = """datasets/huggingface/label-files"""
lowercase_ = """imagenet-1k-id2label.json"""
lowercase_ = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase ) , """r""" ) )
lowercase_ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
lowercase_ = idalabel
lowercase_ = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowercase_ = 3_84
lowercase_ = 15_36
lowercase_ = 6
elif "l16" in checkpoint_url:
lowercase_ = 10_24
lowercase_ = 40_96
lowercase_ = 24
lowercase_ = 16
lowercase_ = 0.1
elif "b4" in checkpoint_url:
lowercase_ = 4
elif "l7" in checkpoint_url:
lowercase_ = 7
lowercase_ = 10_24
lowercase_ = 40_96
lowercase_ = 24
lowercase_ = 16
lowercase_ = 0.1
lowercase_ = ViTMSNModel(__lowerCAmelCase )
lowercase_ = torch.hub.load_state_dict_from_url(__lowerCAmelCase , map_location="""cpu""" )["""target_encoder"""]
lowercase_ = ViTImageProcessor(size=config.image_size )
remove_projection_head(__lowerCAmelCase )
lowercase_ = create_rename_keys(__lowerCAmelCase , base_model=__lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase , base_model=__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
lowercase_ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase_ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
lowercase_ = ViTImageProcessor(
size=config.image_size , image_mean=__lowerCAmelCase , image_std=__lowerCAmelCase )
lowercase_ = image_processor(images=__lowerCAmelCase , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
lowercase_ = model(**__lowerCAmelCase )
lowercase_ = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowercase_ = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] )
elif "b16" in checkpoint_url:
lowercase_ = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] )
elif "l16" in checkpoint_url:
lowercase_ = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] )
elif "b4" in checkpoint_url:
lowercase_ = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] )
else:
lowercase_ = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , __lowerCAmelCase , atol=1E-4 )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCAmelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
UpperCAmelCase : Tuple = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 313
| 1
|
"""simple docstring"""
from __future__ import annotations
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : str):
"""simple docstring"""
lowercase_ , lowercase_ = text, pattern
lowercase_ , lowercase_ = len(lowerCAmelCase_), len(lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : str):
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1):
if char == self.pattern[i]:
return i
return -1
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : int):
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = []
for i in range(self.textLen - self.patLen + 1):
lowercase_ = self.mismatch_in_text(lowerCAmelCase_)
if mismatch_index == -1:
positions.append(lowerCAmelCase_)
else:
lowercase_ = self.match_in_pattern(self.text[mismatch_index])
lowercase_ = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
UpperCAmelCase : List[Any] = "ABAABA"
UpperCAmelCase : str = "AB"
UpperCAmelCase : Dict = BoyerMooreSearch(text, pattern)
UpperCAmelCase : List[Any] = bms.bad_character_heuristic()
if len(positions) == 0:
print("No match found")
else:
print("Pattern found in following positions: ")
print(positions)
| 313
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
UpperCAmelCase : Tuple = logging.get_logger(__name__)
UpperCAmelCase : Optional[int] = {
"deepmind/language-perceiver": "https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "perceiver"
def __init__( self : Optional[int] , lowerCAmelCase_ : List[str]=2_5_6 , lowerCAmelCase_ : Dict=1_2_8_0 , lowerCAmelCase_ : List[Any]=7_6_8 , lowerCAmelCase_ : Optional[Any]=1 , lowerCAmelCase_ : List[Any]=2_6 , lowerCAmelCase_ : Optional[Any]=8 , lowerCAmelCase_ : Tuple=8 , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Optional[Any]="kv" , lowerCAmelCase_ : Dict=1 , lowerCAmelCase_ : Optional[Any]=1 , lowerCAmelCase_ : List[str]="gelu" , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Union[str, Any]=0.02 , lowerCAmelCase_ : List[Any]=1E-12 , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Union[str, Any]=2_6_2 , lowerCAmelCase_ : Union[str, Any]=2_0_4_8 , lowerCAmelCase_ : Any=5_6 , lowerCAmelCase_ : int=[3_6_8, 4_9_6] , lowerCAmelCase_ : Optional[int]=1_6 , lowerCAmelCase_ : Dict=1_9_2_0 , lowerCAmelCase_ : Optional[Any]=1_6 , lowerCAmelCase_ : Tuple=[1, 1_6, 2_2_4, 2_2_4] , **lowerCAmelCase_ : Union[str, Any] , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_)
lowercase_ = num_latents
lowercase_ = d_latents
lowercase_ = d_model
lowercase_ = num_blocks
lowercase_ = num_self_attends_per_block
lowercase_ = num_self_attention_heads
lowercase_ = num_cross_attention_heads
lowercase_ = qk_channels
lowercase_ = v_channels
lowercase_ = cross_attention_shape_for_attention
lowercase_ = self_attention_widening_factor
lowercase_ = cross_attention_widening_factor
lowercase_ = hidden_act
lowercase_ = attention_probs_dropout_prob
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = use_query_residual
# masked language modeling attributes
lowercase_ = vocab_size
lowercase_ = max_position_embeddings
# image classification attributes
lowercase_ = image_size
# flow attributes
lowercase_ = train_size
# multimodal autoencoding attributes
lowercase_ = num_frames
lowercase_ = audio_samples_per_frame
lowercase_ = samples_per_patch
lowercase_ = output_shape
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
@property
def _UpperCAmelCase ( self : str):
"""simple docstring"""
if self.task == "multiple-choice":
lowercase_ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowercase_ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""inputs""", dynamic_axis),
("""attention_mask""", dynamic_axis),
])
@property
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
return 1E-4
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional[TensorType] = None , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 4_0 , lowerCAmelCase_ : int = 4_0 , ):
"""simple docstring"""
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase_ = compute_effective_axis_dimension(
lowerCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase_ = preprocessor.num_special_tokens_to_add(lowerCAmelCase_)
lowercase_ = compute_effective_axis_dimension(
lowerCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase_)
# Generate dummy inputs according to compute batch and sequence
lowercase_ = [""" """.join(["""a"""]) * seq_length] * batch_size
lowercase_ = dict(preprocessor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_))
lowercase_ = inputs.pop("""input_ids""")
return inputs
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase_ = compute_effective_axis_dimension(lowerCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch)
lowercase_ = self._generate_dummy_images(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = dict(preprocessor(images=lowerCAmelCase_ , return_tensors=lowerCAmelCase_))
lowercase_ = inputs.pop("""pixel_values""")
return inputs
else:
raise ValueError(
"""Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.""")
| 313
| 1
|
"""simple docstring"""
UpperCAmelCase : str = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_0000)]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_00_00]
number //= 10_00_00
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
UpperCAmelCase : list[bool | None] = [None] * 1000_0000
UpperCAmelCase : str = True
UpperCAmelCase : List[Any] = False
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> bool:
'''simple docstring'''
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
lowercase_ = chain(next_number(__lowerCAmelCase ) )
lowercase_ = number_chain
while number < 10_00_00_00:
lowercase_ = number_chain
number *= 10
return number_chain
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = 10_00_00_00 ) -> int:
'''simple docstring'''
for i in range(1 , __lowerCAmelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"{solution() = }")
| 313
|
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , unittest.TestCase ):
lowercase__ = BarthezTokenizer
lowercase__ = BarthezTokenizerFast
lowercase__ = True
lowercase__ = True
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
super().setUp()
lowercase_ = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""")
tokenizer.save_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname , legacy_format=lowerCAmelCase_)
lowercase_ = tokenizer
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = """<pad>"""
lowercase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_) , lowerCAmelCase_)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_) , lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , """<s>""")
self.assertEqual(vocab_keys[1] , """<pad>""")
self.assertEqual(vocab_keys[-1] , """<mask>""")
self.assertEqual(len(lowerCAmelCase_) , 1_0_1_1_2_2)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_1_2_2)
@require_torch
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
lowercase_ = [0, 5_7, 3_0_1_8, 7_0_3_0_7, 9_1, 2]
lowercase_ = self.tokenizer(
lowerCAmelCase_ , max_length=len(lowerCAmelCase_) , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors="""pt""")
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_)
self.assertEqual((2, 6) , batch.input_ids.shape)
self.assertEqual((2, 6) , batch.attention_mask.shape)
lowercase_ = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowercase_ = self.get_tokenizer()
lowercase_ = self.get_rust_tokenizer()
lowercase_ = """I was born in 92000, and this is falsé."""
lowercase_ = tokenizer.tokenize(lowerCAmelCase_)
lowercase_ = rust_tokenizer.tokenize(lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_)
lowercase_ = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = self.get_rust_tokenizer()
lowercase_ = tokenizer.encode(lowerCAmelCase_)
lowercase_ = rust_tokenizer.encode(lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
@slow
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = {"""input_ids""": [[0, 4_9_0, 1_4_3_2_8, 4_5_0_7, 3_5_4, 4_7, 4_3_6_6_9, 9_5, 2_5, 7_8_1_1_7, 2_0_2_1_5, 1_9_7_7_9, 1_9_0, 2_2, 4_0_0, 4, 3_5_3_4_3, 8_0_3_1_0, 6_0_3, 8_6, 2_4_9_3_7, 1_0_5, 3_3_4_3_8, 9_4_7_6_2, 1_9_6, 3_9_6_4_2, 7, 1_5, 1_5_9_3_3, 1_7_3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0_5_3_4, 8_7, 2_5, 6_6, 3_3_5_8, 1_9_6, 5_5_2_8_9, 8, 8_2_9_6_1, 8_1, 2_2_0_4, 7_5_2_0_3, 7, 1_5, 7_6_3, 1_2_9_5_6, 2_1_6, 1_7_8, 1_4_3_2_8, 9_5_9_5, 1_3_7_7, 6_9_6_9_3, 7, 4_4_8, 7_1_0_2_1, 1_9_6, 1_8_1_0_6, 1_4_3_7, 1_3_9_7_4, 1_0_8, 9_0_8_3, 4, 4_9_3_1_5, 7, 3_9, 8_6, 1_3_2_6, 2_7_9_3, 4_6_3_3_3, 4, 4_4_8, 1_9_6, 7_4_5_8_8, 7, 4_9_3_1_5, 7, 3_9, 2_1, 8_2_2, 3_8_4_7_0, 7_4, 2_1, 6_6_7_2_3, 6_2_4_8_0, 8, 2_2_0_5_0, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
lowercase_ = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase_ , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=lowerCAmelCase_ , )
| 313
| 1
|
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class SCREAMING_SNAKE_CASE__ :
lowercase__ = 42
lowercase__ = None
lowercase__ = None
UpperCAmelCase : List[str] = namedtuple("CoinsDistribResult", "moves excess")
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(__lowerCAmelCase ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(__lowerCAmelCase ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(_UpperCAmelCase ) != count_coins(_UpperCAmelCase ):
raise ValueError("""The nodes number should be same as the number of coins""" )
# Main calculation
def get_distrib(__lowerCAmelCase ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
lowercase_ = get_distrib(node.left )
lowercase_ = get_distrib(node.right )
lowercase_ = 1 - left_distrib_excess
lowercase_ = 1 - right_distrib_excess
lowercase_ = (
left_distrib_moves
+ right_distrib_moves
+ abs(_UpperCAmelCase )
+ abs(_UpperCAmelCase )
)
lowercase_ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(_UpperCAmelCase , _UpperCAmelCase )
return get_distrib(_UpperCAmelCase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350
|
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCAmelCase : Optional[Any] = "platform"
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class SCREAMING_SNAKE_CASE__ :
lowercase__ = PegasusConfig
lowercase__ = {}
lowercase__ = "gelu"
def __init__( self : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any]=1_3 , lowerCAmelCase_ : Any=7 , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : str=9_9 , lowerCAmelCase_ : Tuple=3_2 , lowerCAmelCase_ : Dict=5 , lowerCAmelCase_ : Union[str, Any]=4 , lowerCAmelCase_ : Dict=3_7 , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : Optional[int]=2_0 , lowerCAmelCase_ : Tuple=2 , lowerCAmelCase_ : List[str]=1 , lowerCAmelCase_ : Optional[Any]=0 , ):
"""simple docstring"""
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = eos_token_id
lowercase_ = pad_token_id
lowercase_ = bos_token_id
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size).clip(3 , self.vocab_size)
lowercase_ = np.expand_dims(np.array([self.eos_token_id] * self.batch_size) , 1)
lowercase_ = np.concatenate([input_ids, eos_tensor] , axis=1)
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowercase_ = prepare_pegasus_inputs_dict(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
return config, inputs_dict
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
lowercase_ = 2_0
lowercase_ = model_class_name(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""])
lowercase_ , lowercase_ = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowercase_ = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""")
lowercase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase_ = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""")
lowercase_ = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = model.decode(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''')
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict):
"""simple docstring"""
lowercase_ = 2_0
lowercase_ = model_class_name(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""])
lowercase_ , lowercase_ = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowercase_ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
] , axis=-1 , )
lowercase_ = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase_ = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""")
lowercase_ = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = model.decode(lowerCAmelCase_ , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_)
lowercase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''')
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , ) -> Optional[Any]:
'''simple docstring'''
if attention_mask is None:
lowercase_ = np.not_equal(__lowerCAmelCase , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
lowercase_ = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , unittest.TestCase ):
lowercase__ = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
lowercase__ = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
lowercase__ = True
lowercase__ = False
lowercase__ = False
lowercase__ = False
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = FlaxPegasusModelTester(self)
lowercase_ = ConfigTester(self , config_class=lowerCAmelCase_)
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowercase_ = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = model_class(lowerCAmelCase_)
@jax.jit
def encode_jitted(lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int]=None , **lowerCAmelCase_ : Optional[int]):
return model.encode(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_)
with self.subTest("""JIT Enabled"""):
lowercase_ = encode_jitted(**lowerCAmelCase_).to_tuple()
with self.subTest("""JIT Disabled"""):
with jax.disable_jit():
lowercase_ = encode_jitted(**lowerCAmelCase_).to_tuple()
self.assertEqual(len(lowerCAmelCase_) , len(lowerCAmelCase_))
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowercase_ = model_class(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""])
lowercase_ = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict):
return model.decode(
decoder_input_ids=lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , encoder_outputs=lowerCAmelCase_ , )
with self.subTest("""JIT Enabled"""):
lowercase_ = decode_jitted(**lowerCAmelCase_).to_tuple()
with self.subTest("""JIT Disabled"""):
with jax.disable_jit():
lowercase_ = decode_jitted(**lowerCAmelCase_).to_tuple()
self.assertEqual(len(lowerCAmelCase_) , len(lowerCAmelCase_))
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowercase_ = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=lowerCAmelCase_)
lowercase_ = np.ones((1, 1))
lowercase_ = model(lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
@slow
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""")
lowercase_ = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""")
lowercase_ = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
lowercase_ = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
lowercase_ = tokenizer(lowerCAmelCase_ , return_tensors="""np""" , truncation=lowerCAmelCase_ , max_length=5_1_2 , padding=lowerCAmelCase_)
lowercase_ = model.generate(**lowerCAmelCase_ , num_beams=2).sequences
lowercase_ = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_)
assert tgt_text == decoded
| 313
| 0
|
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
UpperCAmelCase : Any = 16
UpperCAmelCase : Dict = 32
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase = 16 , __lowerCAmelCase = "bert-base-cased" ) -> Union[str, Any]:
lowercase_ = AutoTokenizer.from_pretrained(lowercase__ )
lowercase_ = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__lowerCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
lowercase_ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowercase_ = datasets.map(
lowercase__ , batched=lowercase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowercase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase_ = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__lowerCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase__ , padding="""max_length""" , max_length=1_28 , return_tensors="""pt""" )
return tokenizer.pad(lowercase__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
lowercase_ = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
lowercase_ = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
return train_dataloader, eval_dataloader
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
model.eval()
lowercase_ = 0
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase_ = model(**lowercase__ )
lowercase_ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowercase_ = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowercase__ ) - 1:
lowercase_ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowercase_ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowercase__ , references=lowercase__ , )
lowercase_ = metric.compute()
return eval_metric["accuracy"]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Any:
lowercase_ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase_ = config["""lr"""]
lowercase_ = int(config["""num_epochs"""] )
lowercase_ = int(config["""seed"""] )
lowercase_ = int(config["""batch_size"""] )
lowercase_ = args.model_name_or_path
set_seed(lowercase__ )
lowercase_ = get_dataloaders(lowercase__ , lowercase__ , lowercase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase_ = AutoModelForSequenceClassification.from_pretrained(lowercase__ , return_dict=lowercase__ )
# Instantiate optimizer
lowercase_ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowercase_ = optimizer_cls(params=model.parameters() , lr=lowercase__ )
if accelerator.state.deepspeed_plugin is not None:
lowercase_ = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
lowercase_ = 1
lowercase_ = (len(lowercase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowercase_ = get_linear_schedule_with_warmup(
optimizer=lowercase__ , num_warmup_steps=0 , num_training_steps=lowercase__ , )
else:
lowercase_ = DummyScheduler(lowercase__ , total_num_steps=lowercase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase_ = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# We need to keep track of how many total steps we have iterated over
lowercase_ = 0
# We also need to keep track of the stating epoch so files are named properly
lowercase_ = 0
lowercase_ = evaluate.load("""glue""" , """mrpc""" )
lowercase_ = num_epochs
if args.partial_train_epoch is not None:
lowercase_ = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
lowercase_ = args.resume_from_checkpoint.split("""epoch_""" )[1]
lowercase_ = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
lowercase_ = int(lowercase__ ) + 1
lowercase_ = evaluation_loop(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
accelerator.print("""resumed checkpoint performance:""" , lowercase__ )
accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir , F'''state_{starting_epoch-1}.json''' ) , """r""" ) as f:
lowercase_ = json.load(lowercase__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
lowercase_ = {}
for epoch in range(lowercase__ , lowercase__ ):
model.train()
for step, batch in enumerate(lowercase__ ):
lowercase_ = model(**lowercase__ )
lowercase_ = outputs.loss
lowercase_ = loss / gradient_accumulation_steps
accelerator.backward(lowercase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
lowercase_ = F'''epoch_{epoch}'''
lowercase_ = os.path.join(args.output_dir , lowercase__ )
accelerator.save_state(lowercase__ )
lowercase_ = evaluation_loop(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
lowercase_ = accuracy
lowercase_ = lr_scheduler.get_lr()[0]
lowercase_ = optimizer.param_groups[0]["""lr"""]
lowercase_ = epoch
lowercase_ = overall_step
accelerator.print(F'''epoch {epoch}:''' , lowercase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F'''state_{epoch}.json''' ) , """w""" ) as f:
json.dump(lowercase__ , lowercase__ )
def _SCREAMING_SNAKE_CASE () -> int:
lowercase_ = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=lowercase__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowercase__ , )
parser.add_argument(
"""--output_dir""" , type=lowercase__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=lowercase__ , default=lowercase__ , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=lowercase__ , default=lowercase__ , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowercase__ , default=2 , help="""Number of train epochs.""" , )
lowercase_ = parser.parse_args()
lowercase_ = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(lowercase__ , lowercase__ )
if __name__ == "__main__":
main()
| 351
|
"""simple docstring"""
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class SCREAMING_SNAKE_CASE__ :
lowercase__ = None
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = None
lowercase__ = None
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = True
lowercase__ = None
lowercase__ = 1
lowercase__ = None
lowercase__ = False
lowercase__ = None
lowercase__ = None
def _UpperCAmelCase ( self : int):
"""simple docstring"""
return self.__class__(**{k: copy.deepcopy(lowerCAmelCase_) for k, v in self.__dict__.items()})
| 313
| 0
|
"""simple docstring"""
import string
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = ''''''
for i in sequence:
lowercase_ = ord(__UpperCAmelCase )
if 65 <= extract <= 90:
output += chr(1_55 - extract )
elif 97 <= extract <= 1_22:
output += chr(2_19 - extract )
else:
output += i
return output
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = string.ascii_letters
lowercase_ = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(__UpperCAmelCase )] if c in letters else c for c in sequence )
def _SCREAMING_SNAKE_CASE () -> None:
'''simple docstring'''
from timeit import timeit
print("""Running performance benchmarks...""" )
lowercase_ = '''from string import printable ; from __main__ import atbash, atbash_slow'''
print(F'''> atbash_slow(): {timeit("atbash_slow(printable)" , setup=__UpperCAmelCase )} seconds''' )
print(F'''> atbash(): {timeit("atbash(printable)" , setup=__UpperCAmelCase )} seconds''' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(F"{example} encrypted in atbash: {atbash(example)}")
benchmark()
| 352
|
"""simple docstring"""
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
UpperCAmelCase : Any = logging.get_logger(__name__)
UpperCAmelCase : Dict = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase : List[Any] = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
UpperCAmelCase : Union[str, Any] = {
"allenai/led-base-16384": 1_6384,
}
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = LEDTokenizer
lowercase__ = ["input_ids", "attention_mask"]
def __init__( self : Dict , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[Any]="replace" , lowerCAmelCase_ : Dict="<s>" , lowerCAmelCase_ : Union[str, Any]="</s>" , lowerCAmelCase_ : List[Any]="</s>" , lowerCAmelCase_ : Optional[Any]="<s>" , lowerCAmelCase_ : Union[str, Any]="<unk>" , lowerCAmelCase_ : List[str]="<pad>" , lowerCAmelCase_ : Dict="<mask>" , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : List[Any]=True , **lowerCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(
lowerCAmelCase_ , lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , errors=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , trim_offsets=lowerCAmelCase_ , **lowerCAmelCase_ , )
lowercase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get("""add_prefix_space""" , lowerCAmelCase_) != add_prefix_space:
lowercase_ = getattr(lowerCAmelCase_ , pre_tok_state.pop("""type"""))
lowercase_ = add_prefix_space
lowercase_ = pre_tok_class(**lowerCAmelCase_)
lowercase_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase_ = """post_processor"""
lowercase_ = getattr(self.backend_tokenizer , lowerCAmelCase_ , lowerCAmelCase_)
if tokenizer_component_instance:
lowercase_ = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase_ = tuple(state["""sep"""])
if "cls" in state:
lowercase_ = tuple(state["""cls"""])
lowercase_ = False
if state.get("""add_prefix_space""" , lowerCAmelCase_) != add_prefix_space:
lowercase_ = add_prefix_space
lowercase_ = True
if state.get("""trim_offsets""" , lowerCAmelCase_) != trim_offsets:
lowercase_ = trim_offsets
lowercase_ = True
if changes_to_apply:
lowercase_ = getattr(lowerCAmelCase_ , state.pop("""type"""))
lowercase_ = component_class(**lowerCAmelCase_)
setattr(self.backend_tokenizer , lowerCAmelCase_ , lowerCAmelCase_)
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""")
return None
return str(self._mask_token)
@mask_token.setter
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : str):
"""simple docstring"""
lowercase_ = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else value
lowercase_ = value
def _UpperCAmelCase ( self : Dict , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : List[Any]):
"""simple docstring"""
lowercase_ = kwargs.get("""is_split_into_words""" , lowerCAmelCase_)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""")
return super()._batch_encode_plus(*lowerCAmelCase_ , **lowerCAmelCase_)
def _UpperCAmelCase ( self : Union[str, Any] , *lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : Any):
"""simple docstring"""
lowercase_ = kwargs.get("""is_split_into_words""" , lowerCAmelCase_)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""")
return super()._encode_plus(*lowerCAmelCase_ , **lowerCAmelCase_)
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None):
"""simple docstring"""
lowercase_ = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_)
return tuple(lowerCAmelCase_)
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any]=None):
"""simple docstring"""
lowercase_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None):
"""simple docstring"""
lowercase_ = [self.sep_token_id]
lowercase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[bool] = None , ):
"""simple docstring"""
lowercase_ = super()._pad(
encoded_inputs=lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding_strategy=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , )
# Load from model defaults
if return_attention_mask is None:
lowercase_ = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowercase_ = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowercase_ = len(encoded_inputs["""global_attention_mask"""]) != len(lowerCAmelCase_)
if needs_to_be_padded:
lowercase_ = len(lowerCAmelCase_) - len(encoded_inputs["""global_attention_mask"""])
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowercase_ = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
lowercase_ = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side))
return encoded_inputs
| 313
| 0
|
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
UpperCAmelCase : Union[str, Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
UpperCAmelCase : Union[str, Any] = " def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n"
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , """models/bert/"""))
lowercase_ = self.transformer_dir
shutil.copy(
os.path.join(__snake_case , """src/transformers/models/bert/modeling_bert.py""") , os.path.join(self.transformer_dir , """models/bert/modeling_bert.py""") , )
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = """src/transformers"""
shutil.rmtree(self.transformer_dir)
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple=None):
"""simple docstring"""
lowercase_ = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
lowercase_ = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
lowercase_ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9)
lowercase_ = black.format_str(__snake_case , mode=__snake_case)
lowercase_ = os.path.join(self.transformer_dir , """new_code.py""")
with open(__snake_case , """w""" , newline="""\n""") as f:
f.write(__snake_case)
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__snake_case)) == 0)
else:
check_copies.is_copy_consistent(f.name , overwrite=__snake_case)
with open(__snake_case , """r""") as f:
self.assertTrue(f.read() , __snake_case)
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = check_copies.find_code_in_transformers("""models.bert.modeling_bert.BertLMPredictionHead""")
self.assertEqual(__snake_case , __snake_case)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , __snake_case , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , re.sub("""Bert""" , """TestModel""" , __snake_case) , )
# Copy consistency with a really long name
lowercase_ = """TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
F'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}''' , F'''{long_class_name}LMPredictionHead''' , re.sub("""Bert""" , __snake_case , __snake_case) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , __snake_case , overwrite_result=re.sub("""Bert""" , """TestModel""" , __snake_case) , )
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = check_copies.LOCALIZED_READMES["""README_zh-hans.md"""]
lowercase_ = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"""
""" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"""
""" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"""
""" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"""
""" Luong, Quoc V. Le, Christopher D. Manning."""
)
lowercase_ = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
lowercase_ = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"""
""" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"""
""" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"""
""" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"""
""" Christopher D. Manning 发布。\n"""
)
lowercase_ , lowercase_ = check_copies.convert_to_localized_md(
__snake_case , __snake_case , localized_readme["""format_model_list"""])
self.assertFalse(__snake_case)
self.assertEqual(__snake_case , __snake_case)
lowercase_ , lowercase_ = check_copies.convert_to_localized_md(
__snake_case , __snake_case , localized_readme["""format_model_list"""])
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(__snake_case)
lowercase_ = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."""
)
lowercase_ = (
"""1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"""
""" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
lowercase_ = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
lowercase_ , lowercase_ = check_copies.convert_to_localized_md(
__snake_case , __snake_case , localized_readme["""format_model_list"""])
# Check if the model link is synchronized.
self.assertEqual(__snake_case , __snake_case)
| 353
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase : Union[str, Any] = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : int = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
UpperCAmelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 313
| 0
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> list:
'''simple docstring'''
if len(lowerCamelCase__ ) <= 1:
return lst
lowercase_ = 1
while i < len(lowerCamelCase__ ):
if lst[i - 1] <= lst[i]:
i += 1
else:
lowercase_ = lst[i], lst[i - 1]
i -= 1
if i == 0:
lowercase_ = 1
return lst
if __name__ == "__main__":
UpperCAmelCase : Union[str, Any] = input("Enter numbers separated by a comma:\n").strip()
UpperCAmelCase : str = [int(item) for item in user_input.split(",")]
print(gnome_sort(unsorted))
| 354
|
"""simple docstring"""
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any]=1_3 , lowerCAmelCase_ : Optional[Any]=7 , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : int=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Tuple=9_9 , lowerCAmelCase_ : List[str]=6_4 , lowerCAmelCase_ : Optional[int]=3_2 , lowerCAmelCase_ : Optional[Any]=5 , lowerCAmelCase_ : Optional[int]=4 , lowerCAmelCase_ : str=3_7 , lowerCAmelCase_ : Any="gelu" , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : int=5_1_2 , lowerCAmelCase_ : Optional[int]=1_6 , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : List[Any]=0.02 , lowerCAmelCase_ : Optional[int]=3 , lowerCAmelCase_ : Optional[Any]=4 , lowerCAmelCase_ : Dict=None , ):
"""simple docstring"""
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_input_mask
lowercase_ = use_token_type_ids
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = embedding_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = num_labels
lowercase_ = num_choices
lowercase_ = scope
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase_ = None
if self.use_input_mask:
lowercase_ = random_attention_mask([self.batch_size, self.seq_length])
lowercase_ = None
if self.use_token_type_ids:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
lowercase_ = None
lowercase_ = None
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
lowercase_ = ids_tensor([self.batch_size] , self.num_choices)
lowercase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict):
"""simple docstring"""
lowercase_ = MegatronBertModel(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_)
lowercase_ = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_)
lowercase_ = model(lowerCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str]):
"""simple docstring"""
lowercase_ = MegatronBertForMaskedLM(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int]):
"""simple docstring"""
lowercase_ = MegatronBertForCausalLM(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
lowercase_ = MegatronBertForNextSentencePrediction(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
lowercase_ = MegatronBertForPreTraining(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , next_sentence_label=lowerCAmelCase_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple):
"""simple docstring"""
lowercase_ = MegatronBertForQuestionAnswering(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str]):
"""simple docstring"""
lowercase_ = self.num_labels
lowercase_ = MegatronBertForSequenceClassification(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict):
"""simple docstring"""
lowercase_ = self.num_labels
lowercase_ = MegatronBertForTokenClassification(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
lowercase_ = self.num_choices
lowercase_ = MegatronBertForMultipleChoice(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
lowercase_ = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
lowercase_ = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
lowercase_ = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
lowercase__ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase__ = (
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase__ = True
# test_resize_embeddings = False
lowercase__ = False
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any]=False):
"""simple docstring"""
lowercase_ = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_)
if return_labels:
if model_class in get_values(lowerCAmelCase_):
lowercase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase_)
lowercase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_)
return inputs_dict
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = MegatronBertModelTester(self)
lowercase_ = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=3_7)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*lowerCAmelCase_)
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*lowerCAmelCase_)
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*lowerCAmelCase_)
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*lowerCAmelCase_)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
return torch.tensor(
__lowerCAmelCase , dtype=torch.long , device=__lowerCAmelCase , )
UpperCAmelCase : Any = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
@unittest.skip("""Model is not available.""")
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = """nvidia/megatron-bert-uncased-345m"""
if "MYDIR" in os.environ:
lowercase_ = os.path.join(os.environ["""MYDIR"""] , lowerCAmelCase_)
lowercase_ = MegatronBertModel.from_pretrained(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.half()
lowercase_ = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]])
with torch.no_grad():
lowercase_ = model(lowerCAmelCase_)[0]
lowercase_ = torch.Size((1, 9, 1_0_2_4))
self.assertEqual(output.shape , lowerCAmelCase_)
lowercase_ = [-0.6_040, -0.2_517, -0.1_025, 0.3_420, -0.6_758, -0.0_017, -0.1_089, -0.1_990, 0.5_728]
for ii in range(3):
for jj in range(3):
lowercase_ = output[0, ii, jj]
lowercase_ = expected[3 * ii + jj]
lowercase_ = """ii={} jj={} a={} b={}""".format(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
self.assertTrue(math.isclose(lowerCAmelCase_ , lowerCAmelCase_ , rel_tol=lowerCAmelCase_ , abs_tol=lowerCAmelCase_) , msg=lowerCAmelCase_)
| 313
| 0
|
"""simple docstring"""
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
lowercase__ = '''vision-encoder-decoder'''
lowercase__ = True
def __init__( self : List[Any] , **lowerCAmelCase_ : List[Any]):
"""simple docstring"""
super().__init__(**_a)
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F'''A configuraton of type {self.model_type} cannot be instantiated because '''
F'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''')
lowercase_ = kwargs.pop("""encoder""")
lowercase_ = encoder_config.pop("""model_type""")
lowercase_ = kwargs.pop("""decoder""")
lowercase_ = decoder_config.pop("""model_type""")
lowercase_ = AutoConfig.for_model(_a , **_a)
lowercase_ = AutoConfig.for_model(_a , **_a)
lowercase_ = True
@classmethod
def _UpperCAmelCase ( cls : List[Any] , lowerCAmelCase_ : PretrainedConfig , lowerCAmelCase_ : PretrainedConfig , **lowerCAmelCase_ : Dict):
"""simple docstring"""
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""")
lowercase_ = True
lowercase_ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_a)
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = copy.deepcopy(self.__dict__)
lowercase_ = self.encoder.to_dict()
lowercase_ = self.decoder.to_dict()
lowercase_ = self.__class__.model_type
return output
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
lowercase__ = version.parse("1.11" )
@property
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
])
@property
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
return 1E-4
@property
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}})
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
@property
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = OrderedDict()
lowercase_ = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
lowercase_ = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
lowercase_ = {0: 'batch', 1: 'encoder_sequence'}
return common_inputs
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : "PreTrainedTokenizerBase" , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional["TensorType"] = None , ):
"""simple docstring"""
import torch
lowercase_ = OrderedDict()
lowercase_ = super().generate_dummy_inputs(
_a , batch_size=_a , seq_length=_a , is_pair=_a , framework=_a)
lowercase_ = dummy_input['input_ids'].shape
lowercase_ = (batch, encoder_sequence, self._config.encoder_hidden_size)
lowercase_ = dummy_input.pop("""input_ids""")
lowercase_ = dummy_input.pop("""attention_mask""")
lowercase_ = torch.zeros(_a)
return common_inputs
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
@property
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
pass
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : PretrainedConfig):
"""simple docstring"""
return VisionEncoderDecoderEncoderOnnxConfig(_a)
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : PretrainedConfig , lowerCAmelCase_ : PretrainedConfig , lowerCAmelCase_ : str = "default"):
"""simple docstring"""
lowercase_ = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(_a , _a)
| 355
|
"""simple docstring"""
from __future__ import annotations
import numpy as np
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> tuple[np.ndarray, np.ndarray]:
'''simple docstring'''
lowercase_ , lowercase_ = np.shape(__lowerCAmelCase )
if rows != columns:
lowercase_ = (
"""'table' has to be of square shaped array but got a """
F'''{rows}x{columns} array:\n{table}'''
)
raise ValueError(__lowerCAmelCase )
lowercase_ = np.zeros((rows, columns) )
lowercase_ = np.zeros((rows, columns) )
for i in range(__lowerCAmelCase ):
for j in range(__lowerCAmelCase ):
lowercase_ = sum(lower[i][k] * upper[k][j] for k in range(__lowerCAmelCase ) )
if upper[j][j] == 0:
raise ArithmeticError("""No LU decomposition exists""" )
lowercase_ = (table[i][j] - total) / upper[j][j]
lowercase_ = 1
for j in range(__lowerCAmelCase , __lowerCAmelCase ):
lowercase_ = sum(lower[i][k] * upper[k][j] for k in range(__lowerCAmelCase ) )
lowercase_ = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313
| 0
|
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCAmelCase : List[Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
lowercase__ = ['pixel_values']
def __init__( self : Optional[int] , lowerCAmelCase_ : List[Any] = True , lowerCAmelCase_ : List[Any] = None , lowerCAmelCase_ : str = PILImageResampling.BILINEAR , lowerCAmelCase_ : Tuple = True , lowerCAmelCase_ : List[Any] = None , lowerCAmelCase_ : List[str] = True , lowerCAmelCase_ : Tuple = 1 / 2_5_5 , lowerCAmelCase_ : str = True , lowerCAmelCase_ : Dict = None , lowerCAmelCase_ : Optional[int] = None , **lowerCAmelCase_ : Dict , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_)
lowercase_ = size if size is not None else {"""shortest_edge""": 2_5_6}
lowercase_ = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_)
lowercase_ = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
lowercase_ = get_size_dict(lowerCAmelCase_ , param_name="""crop_size""")
lowercase_ = do_resize
lowercase_ = size
lowercase_ = resample
lowercase_ = do_center_crop
lowercase_ = crop_size
lowercase_ = do_rescale
lowercase_ = rescale_factor
lowercase_ = do_normalize
lowercase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict = PILImageResampling.BICUBIC , lowerCAmelCase_ : Any = None , **lowerCAmelCase_ : Dict , ):
"""simple docstring"""
lowercase_ = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_)
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''')
lowercase_ = get_resize_output_image_size(lowerCAmelCase_ , size=size["""shortest_edge"""] , default_to_square=lowerCAmelCase_)
return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] = None , **lowerCAmelCase_ : Dict , ):
"""simple docstring"""
lowercase_ = get_size_dict(lowerCAmelCase_)
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}''')
return center_crop(lowerCAmelCase_ , size=(size["""height"""], size["""width"""]) , data_format=lowerCAmelCase_ , **lowerCAmelCase_)
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int = None , **lowerCAmelCase_ : Any):
"""simple docstring"""
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_)
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] = None , **lowerCAmelCase_ : Any , ):
"""simple docstring"""
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_)
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : int = None , lowerCAmelCase_ : int = None , lowerCAmelCase_ : Dict = None , lowerCAmelCase_ : Optional[Any] = None , lowerCAmelCase_ : Any = None , lowerCAmelCase_ : int = None , lowerCAmelCase_ : Dict = None , lowerCAmelCase_ : Any = None , lowerCAmelCase_ : Union[str, Any] = None , lowerCAmelCase_ : str = None , lowerCAmelCase_ : Any = None , lowerCAmelCase_ : int = ChannelDimension.FIRST , **lowerCAmelCase_ : int , ):
"""simple docstring"""
lowercase_ = do_resize if do_resize is not None else self.do_resize
lowercase_ = size if size is not None else self.size
lowercase_ = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_)
lowercase_ = resample if resample is not None else self.resample
lowercase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase_ = crop_size if crop_size is not None else self.crop_size
lowercase_ = get_size_dict(lowerCAmelCase_ , param_name="""crop_size""")
lowercase_ = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ = image_mean if image_mean is not None else self.image_mean
lowercase_ = image_std if image_std is not None else self.image_std
lowercase_ = make_list_of_images(lowerCAmelCase_)
if not valid_images(lowerCAmelCase_):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""")
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""")
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""")
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""")
# All transformations expect numpy arrays.
lowercase_ = [to_numpy_array(lowerCAmelCase_) for image in images]
if do_resize:
lowercase_ = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_) for image in images]
if do_center_crop:
lowercase_ = [self.center_crop(image=lowerCAmelCase_ , size=lowerCAmelCase_) for image in images]
if do_rescale:
lowercase_ = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_) for image in images]
if do_normalize:
lowercase_ = [self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_) for image in images]
lowercase_ = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_) for image in images]
lowercase_ = {"""pixel_values""": images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any = None):
"""simple docstring"""
lowercase_ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCAmelCase_) != len(lowerCAmelCase_):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""")
if is_torch_tensor(lowerCAmelCase_):
lowercase_ = target_sizes.numpy()
lowercase_ = []
for idx in range(len(lowerCAmelCase_)):
lowercase_ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=lowerCAmelCase_)
lowercase_ = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(lowerCAmelCase_)
else:
lowercase_ = logits.argmax(dim=1)
lowercase_ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 356
|
"""simple docstring"""
import os
from collections.abc import Iterator
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = "." ) -> Iterator[str]:
'''simple docstring'''
for dir_path, dir_names, filenames in os.walk(__lowerCAmelCase ):
lowercase_ = [d for d in dir_names if d != """scripts""" and d[0] not in """._"""]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(__lowerCAmelCase )[1] in (".py", ".ipynb"):
yield os.path.join(__lowerCAmelCase , __lowerCAmelCase ).lstrip("""./""" )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return F'''{i * " "}*''' if i else "\n##"
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(__lowerCAmelCase ) or old_parts[i] != new_part) and new_part:
print(F'''{md_prefix(__lowerCAmelCase )} {new_part.replace("_" , " " ).title()}''' )
return new_path
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = "." ) -> None:
'''simple docstring'''
lowercase_ = """"""
for filepath in sorted(good_file_paths(__lowerCAmelCase ) ):
lowercase_ , lowercase_ = os.path.split(__lowerCAmelCase )
if filepath != old_path:
lowercase_ = print_path(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = (filepath.count(os.sep ) + 1) if filepath else 0
lowercase_ = F'''{filepath}/{filename}'''.replace(""" """ , """%20""" )
lowercase_ = os.path.splitext(filename.replace("""_""" , """ """ ).title() )[0]
print(F'''{md_prefix(__lowerCAmelCase )} [{filename}]({url})''' )
if __name__ == "__main__":
print_directory_md(".")
| 313
| 0
|
from __future__ import annotations
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> bool:
'''simple docstring'''
lowercase_ = len(__A )
# We need to create solution object to save path.
lowercase_ = [[0 for _ in range(__A )] for _ in range(__A )]
lowercase_ = run_maze(__A , 0 , 0 , __A )
if solved:
print("""\n""".join(str(__A ) for row in solutions ) )
else:
print("""No solution exists!""" )
return solved
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> bool:
'''simple docstring'''
lowercase_ = len(__A )
# Final check point.
if i == j == (size - 1):
lowercase_ = 1
return True
lowercase_ = (not i < 0) and (not j < 0) # Check lower bounds
lowercase_ = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
lowercase_ = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
lowercase_ = 1
# check for directions
if (
run_maze(__A , i + 1 , __A , __A )
or run_maze(__A , __A , j + 1 , __A )
or run_maze(__A , i - 1 , __A , __A )
or run_maze(__A , __A , j - 1 , __A )
):
return True
lowercase_ = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> list:
'''simple docstring'''
if any(not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or x < 0 for x in sequence ):
raise TypeError("""Sequence must be list of non-negative integers""" )
for _ in range(len(__lowerCAmelCase ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(__lowerCAmelCase , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 313
| 0
|
"""simple docstring"""
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
def __init__( self : int , lowerCAmelCase_ : pyspark.sql.DataFrame , lowerCAmelCase_ : Optional[NamedSplit] = None , lowerCAmelCase_ : Optional[Features] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : str = None , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : str = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : str = "arrow" , **lowerCAmelCase_ : str , ):
"""simple docstring"""
super().__init__(
split=_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase , keep_in_memory=_UpperCamelCase , streaming=_UpperCamelCase , **_UpperCamelCase , )
lowercase_ = load_from_cache_file
lowercase_ = file_format
lowercase_ = Spark(
df=_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase , working_dir=_UpperCamelCase , **_UpperCamelCase , )
def _UpperCAmelCase ( self : str):
"""simple docstring"""
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split)
lowercase_ = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=_UpperCamelCase , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split)
| 358
|
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCAmelCase : Tuple = "platform"
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , ) -> List[Any]:
'''simple docstring'''
if attention_mask is None:
lowercase_ = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
lowercase_ = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
lowercase_ = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowercase_ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowercase_ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Optional[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int=1_3 , lowerCAmelCase_ : Optional[Any]=7 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : Dict=9_9 , lowerCAmelCase_ : List[Any]=1_6 , lowerCAmelCase_ : Union[str, Any]=2 , lowerCAmelCase_ : str=4 , lowerCAmelCase_ : Dict=4 , lowerCAmelCase_ : List[str]="gelu" , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : str=3_2 , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : Any=1 , lowerCAmelCase_ : str=0 , lowerCAmelCase_ : List[Any]=0.02 , ):
"""simple docstring"""
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = eos_token_id
lowercase_ = pad_token_id
lowercase_ = bos_token_id
lowercase_ = initializer_range
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size) , 3 , self.vocab_size)
lowercase_ = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa)) , -1)
lowercase_ = shift_tokens_right(lowerCAmelCase_ , 1 , 2)
lowercase_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowerCAmelCase_ , )
lowercase_ = prepare_blenderbot_inputs_dict(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
return config, inputs_dict
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ , lowercase_ = self.prepare_config_and_inputs()
return config, inputs_dict
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str):
"""simple docstring"""
lowercase_ = 2_0
lowercase_ = model_class_name(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""])
lowercase_ , lowercase_ = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowercase_ = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""")
lowercase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase_ = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""")
lowercase_ = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = model.decode(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''')
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any]):
"""simple docstring"""
lowercase_ = 2_0
lowercase_ = model_class_name(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""])
lowercase_ , lowercase_ = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowercase_ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
] , axis=-1 , )
lowercase_ = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase_ = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""")
lowercase_ = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = model.decode(lowerCAmelCase_ , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_)
lowercase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''')
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
lowercase__ = 99
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
lowercase_ = input_ids.shape[0]
lowercase_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ , lowercase_ , lowercase_ = self._get_config_and_data()
lowercase_ = FlaxBlenderbotForConditionalGeneration(lowerCAmelCase_)
lowercase_ = lm_model(input_ids=lowerCAmelCase_)
lowercase_ = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
lowercase_ = FlaxBlenderbotForConditionalGeneration(lowerCAmelCase_)
lowercase_ = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa)
lowercase_ = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa)
lowercase_ = lm_model(input_ids=lowerCAmelCase_ , decoder_input_ids=lowerCAmelCase_)
lowercase_ = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa)
lowercase_ = shift_tokens_right(lowerCAmelCase_ , 1 , 2)
lowercase_ = np.equal(lowerCAmelCase_ , 1).astype(np.floataa).sum()
lowercase_ = np.equal(lowerCAmelCase_ , 1).astype(np.floataa).sum()
self.assertEqual(shifted.shape , input_ids.shape)
self.assertEqual(lowerCAmelCase_ , n_pad_before - 1)
self.assertTrue(np.equal(shifted[:, 0] , 2).all())
@require_flax
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , unittest.TestCase , __UpperCAmelCase ):
lowercase__ = True
lowercase__ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowercase__ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = FlaxBlenderbotModelTester(self)
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowercase_ = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = model_class(lowerCAmelCase_)
@jax.jit
def encode_jitted(lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any]=None , **lowerCAmelCase_ : str):
return model.encode(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_)
with self.subTest("""JIT Enabled"""):
lowercase_ = encode_jitted(**lowerCAmelCase_).to_tuple()
with self.subTest("""JIT Disabled"""):
with jax.disable_jit():
lowercase_ = encode_jitted(**lowerCAmelCase_).to_tuple()
self.assertEqual(len(lowerCAmelCase_) , len(lowerCAmelCase_))
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowercase_ = model_class(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""])
lowercase_ = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any]):
return model.decode(
decoder_input_ids=lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , encoder_outputs=lowerCAmelCase_ , )
with self.subTest("""JIT Enabled"""):
lowercase_ = decode_jitted(**lowerCAmelCase_).to_tuple()
with self.subTest("""JIT Disabled"""):
with jax.disable_jit():
lowercase_ = decode_jitted(**lowerCAmelCase_).to_tuple()
self.assertEqual(len(lowerCAmelCase_) , len(lowerCAmelCase_))
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowercase_ = model_class_name.from_pretrained("""facebook/blenderbot-400M-distill""")
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
lowercase_ = np.ones((1, 1)) * model.config.eos_token_id
lowercase_ = model(lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
@unittest.skipUnless(jax_device != """cpu""" , """3B test too slow on CPU.""")
@slow
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = {"""num_beams""": 1, """early_stopping""": True, """min_length""": 1_5, """max_length""": 2_5}
lowercase_ = {"""skip_special_tokens""": True, """clean_up_tokenization_spaces""": True}
lowercase_ = FlaxBlenderbotForConditionalGeneration.from_pretrained("""facebook/blenderbot-3B""" , from_pt=lowerCAmelCase_)
lowercase_ = BlenderbotTokenizer.from_pretrained("""facebook/blenderbot-3B""")
lowercase_ = ["""Sam"""]
lowercase_ = tokenizer(lowerCAmelCase_ , return_tensors="""jax""")
lowercase_ = model.generate(**lowerCAmelCase_ , **lowerCAmelCase_)
lowercase_ = """Sam is a great name. It means \"sun\" in Gaelic."""
lowercase_ = tokenizer.batch_decode(lowerCAmelCase_ , **lowerCAmelCase_)
assert generated_txt[0].strip() == tgt_text
| 313
| 0
|
"""simple docstring"""
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
UpperCAmelCase : Optional[Any] = (
"""This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py"""
)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
warnings.warn(__lowerCAmelCase , __lowerCAmelCase )
requires_backends(__lowerCAmelCase , """sklearn""" )
return (preds == labels).mean()
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
warnings.warn(__lowerCAmelCase , __lowerCAmelCase )
requires_backends(__lowerCAmelCase , """sklearn""" )
lowercase_ = simple_accuracy(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = fa_score(y_true=__lowerCAmelCase , y_pred=__lowerCAmelCase )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
warnings.warn(__lowerCAmelCase , __lowerCAmelCase )
requires_backends(__lowerCAmelCase , """sklearn""" )
lowercase_ = pearsonr(__lowerCAmelCase , __lowerCAmelCase )[0]
lowercase_ = spearmanr(__lowerCAmelCase , __lowerCAmelCase )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
warnings.warn(__lowerCAmelCase , __lowerCAmelCase )
requires_backends(__lowerCAmelCase , """sklearn""" )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ), F'''Predictions and labels have mismatched lengths {len(__lowerCAmelCase )} and {len(__lowerCAmelCase )}'''
if task_name == "cola":
return {"mcc": matthews_corrcoef(__lowerCAmelCase , __lowerCAmelCase )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(__lowerCAmelCase , __lowerCAmelCase )}
elif task_name == "mrpc":
return acc_and_fa(__lowerCAmelCase , __lowerCAmelCase )
elif task_name == "sts-b":
return pearson_and_spearman(__lowerCAmelCase , __lowerCAmelCase )
elif task_name == "qqp":
return acc_and_fa(__lowerCAmelCase , __lowerCAmelCase )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(__lowerCAmelCase , __lowerCAmelCase )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(__lowerCAmelCase , __lowerCAmelCase )}
elif task_name == "qnli":
return {"acc": simple_accuracy(__lowerCAmelCase , __lowerCAmelCase )}
elif task_name == "rte":
return {"acc": simple_accuracy(__lowerCAmelCase , __lowerCAmelCase )}
elif task_name == "wnli":
return {"acc": simple_accuracy(__lowerCAmelCase , __lowerCAmelCase )}
elif task_name == "hans":
return {"acc": simple_accuracy(__lowerCAmelCase , __lowerCAmelCase )}
else:
raise KeyError(__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Any:
'''simple docstring'''
warnings.warn(__lowerCAmelCase , __lowerCAmelCase )
requires_backends(__lowerCAmelCase , """sklearn""" )
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
raise ValueError(F'''Predictions and labels have mismatched lengths {len(__lowerCAmelCase )} and {len(__lowerCAmelCase )}''' )
if task_name == "xnli":
return {"acc": simple_accuracy(__lowerCAmelCase , __lowerCAmelCase )}
else:
raise KeyError(__lowerCAmelCase )
| 359
|
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
UpperCAmelCase : Dict = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
UpperCAmelCase : Union[str, Any] = 10
UpperCAmelCase : Union[str, Any] = 256
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Optional[MinHash]:
'''simple docstring'''
if len(__lowerCAmelCase ) < MIN_NUM_TOKENS:
return None
lowercase_ = MinHash(num_perm=__lowerCAmelCase )
for token in set(__lowerCAmelCase ):
min_hash.update(token.encode() )
return min_hash
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Set[str]:
'''simple docstring'''
return {t for t in NON_ALPHA.split(__lowerCAmelCase ) if len(t.strip() ) > 0}
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[str] , *,
lowerCAmelCase_ : float = 0.85 , ):
"""simple docstring"""
lowercase_ = duplication_jaccard_threshold
lowercase_ = NUM_PERM
lowercase_ = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm)
lowercase_ = defaultdict(lowerCAmelCase_)
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : MinHash):
"""simple docstring"""
lowercase_ = self._index.query(lowerCAmelCase_)
if code_key in self._index.keys:
print(F'''Duplicate key {code_key}''')
return
self._index.insert(lowerCAmelCase_ , lowerCAmelCase_)
if len(lowerCAmelCase_) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(lowerCAmelCase_)
break
else:
self._duplicate_clusters[close_duplicates[0]].add(lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = []
for base, duplicates in self._duplicate_clusters.items():
lowercase_ = [base] + list(lowerCAmelCase_)
# reformat the cluster to be a list of dict
lowercase_ = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(lowerCAmelCase_)
return duplicate_clusters
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : List[str]):
"""simple docstring"""
lowercase_ = self.get_duplicate_clusters()
with open(lowerCAmelCase_ , """w""") as f:
json.dump(lowerCAmelCase_ , lowerCAmelCase_)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ , lowercase_ = element
lowercase_ = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[str]:
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(__lowerCAmelCase , max_queue_size=1_00_00 ) , chunksize=1_00 , ):
if data is not None:
yield data
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = DuplicationIndex(duplication_jaccard_threshold=__lowerCAmelCase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(__lowerCAmelCase ) ) , max_queue_size=1_00 ) ):
di.add(__lowerCAmelCase , __lowerCAmelCase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> float:
'''simple docstring'''
lowercase_ = get_tokens(__lowerCAmelCase )
lowercase_ = get_tokens(__lowerCAmelCase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
UpperCAmelCase : Optional[Any] = None
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = []
for elementa in cluster:
lowercase_ = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
lowercase_ = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(__lowerCAmelCase , __lowerCAmelCase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
lowercase_ = 1
extremes.append(__lowerCAmelCase )
return extremes
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
global _shared_dataset
lowercase_ = dataset
lowercase_ = []
lowercase_ = partial(_find_cluster_extremes_shared , jaccard_threshold=__lowerCAmelCase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
__lowerCAmelCase , __lowerCAmelCase , ) , total=len(__lowerCAmelCase ) , ):
extremes_list.append(__lowerCAmelCase )
return extremes_list
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
'''simple docstring'''
lowercase_ = make_duplicate_clusters(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
lowercase_ = {}
lowercase_ = find_extremes(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
for extremes in extremes_clusters:
for element in extremes:
lowercase_ = element
lowercase_ = duplicate_indices - set(extreme_dict.keys() )
lowercase_ = dataset.filter(lambda __lowerCAmelCase , __lowerCAmelCase : idx not in remove_indices , with_indices=__lowerCAmelCase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
lowercase_ = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
lowercase_ = extreme_dict[element["""base_index"""]]["""copies"""]
print(F'''Original dataset size: {len(__lowerCAmelCase )}''' )
print(F'''Number of duplicate clusters: {len(__lowerCAmelCase )}''' )
print(F'''Files in duplicate cluster: {len(__lowerCAmelCase )}''' )
print(F'''Unique files in duplicate cluster: {len(__lowerCAmelCase )}''' )
print(F'''Filtered dataset size: {len(__lowerCAmelCase )}''' )
return ds_filter, duplicate_clusters
| 313
| 0
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = 10**9 ) -> List[Any]:
'''simple docstring'''
lowercase_ = 1
lowercase_ = 2
lowercase_ = 0
lowercase_ = 0
lowercase_ = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
lowercase_ = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F"{solution() = }")
| 360
|
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
UpperCAmelCase : Union[str, Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["memory_attention", "encoder_attn"],
["attention", "attn"],
["/", "."],
[".LayerNorm.gamma", "_layer_norm.weight"],
[".LayerNorm.beta", "_layer_norm.bias"],
["r.layer_", "r.layers."],
["output_proj", "out_proj"],
["ffn.dense_1.", "fc2."],
["ffn.dense.", "fc1."],
["ffn_layer_norm", "final_layer_norm"],
["kernel", "weight"],
["encoder_layer_norm.", "encoder.layer_norm."],
["decoder_layer_norm.", "decoder.layer_norm."],
["embeddings.weights", "shared.weight"],
]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
lowercase_ = k.replace(__lowerCAmelCase , __lowerCAmelCase )
return k
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> PegasusForConditionalGeneration:
'''simple docstring'''
lowercase_ = DEFAULTS.copy()
cfg_kwargs.update(__lowerCAmelCase )
lowercase_ = PegasusConfig(**__lowerCAmelCase )
lowercase_ = PegasusForConditionalGeneration(__lowerCAmelCase )
lowercase_ = torch_model.model.state_dict()
lowercase_ = {}
for k, v in tf_weights.items():
lowercase_ = rename_state_dict_key(__lowerCAmelCase )
if new_k not in sd:
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
lowercase_ = v.T
lowercase_ = torch.tensor(__lowerCAmelCase , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
lowercase_ = torch.zeros_like(mapping["""shared.weight"""][cfg.pad_token_id + 1] )
lowercase_ = mapping["""shared.weight"""]
lowercase_ = mapping["""shared.weight"""]
lowercase_ = {k: torch.zeros_like(__lowerCAmelCase ) for k, v in sd.items() if k.endswith("""bias""" ) and k not in mapping}
mapping.update(**__lowerCAmelCase )
lowercase_ , lowercase_ = torch_model.model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
lowercase_ = [
k for k in missing if k not in ["""encoder.embed_positions.weight""", """decoder.embed_positions.weight"""]
]
assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], F'''no matches found for the following tf keys {extra}'''
return torch_model
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
'''simple docstring'''
lowercase_ = tf.train.list_variables(__lowerCAmelCase )
lowercase_ = {}
lowercase_ = ["""Adafactor""", """global_step"""]
for name, shape in tqdm(__lowerCAmelCase , desc="""converting tf checkpoint to dict""" ):
lowercase_ = any(pat in name for pat in ignore_name )
if skip_key:
continue
lowercase_ = tf.train.load_variable(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = array
return tf_weights
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = Path(__lowerCAmelCase ).parent.name
lowercase_ = task_specific_params[F'''summarization_{dataset}''']["""max_position_embeddings"""]
lowercase_ = PegasusTokenizer.from_pretrained("""sshleifer/pegasus""" , model_max_length=__lowerCAmelCase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(__lowerCAmelCase )
# convert model
lowercase_ = get_tf_weights_as_numpy(__lowerCAmelCase )
lowercase_ = task_specific_params[F'''summarization_{dataset}''']
if dataset == "large":
lowercase_ = task_specific_params
lowercase_ = convert_pegasus(__lowerCAmelCase , __lowerCAmelCase )
torch_model.save_pretrained(__lowerCAmelCase )
lowercase_ = torch_model.state_dict()
sd.pop("""model.decoder.embed_positions.weight""" )
sd.pop("""model.encoder.embed_positions.weight""" )
torch.save(__lowerCAmelCase , Path(__lowerCAmelCase ) / """pytorch_model.bin""" )
if __name__ == "__main__":
UpperCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
UpperCAmelCase : List[Any] = parser.parse_args()
if args.save_dir is None:
UpperCAmelCase : List[str] = Path(args.tf_ckpt_path).parent.name
UpperCAmelCase : int = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 313
| 0
|
from string import ascii_uppercase
UpperCAmelCase : List[str] = {char: i for i, char in enumerate(ascii_uppercase)}
UpperCAmelCase : Optional[int] = dict(enumerate(ascii_uppercase))
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = len(lowerCAmelCase__ )
lowercase_ = 0
while True:
if x == i:
lowercase_ = 0
if len(lowerCAmelCase__ ) == len(lowerCAmelCase__ ):
break
key += key[i]
i += 1
return key
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = """"""
lowercase_ = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
lowercase_ = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = """"""
lowercase_ = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
lowercase_ = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def _SCREAMING_SNAKE_CASE () -> None:
'''simple docstring'''
lowercase_ = """THE GERMAN ATTACK"""
lowercase_ = """SECRET"""
lowercase_ = generate_key(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase_ = cipher_text(lowerCAmelCase__ , lowerCAmelCase__ )
print(F'''Encrypted Text = {s}''' )
print(F'''Original Text = {original_text(lowerCAmelCase__ , lowerCAmelCase__ )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 361
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
def _SCREAMING_SNAKE_CASE () -> Generator[int, None, None]:
'''simple docstring'''
lowercase_ = {}
lowercase_ = 2
while True:
lowercase_ = factor_map.pop(__lowerCAmelCase , __lowerCAmelCase )
if factor:
lowercase_ = factor + prime
while x in factor_map:
x += factor
lowercase_ = factor
else:
lowercase_ = prime
yield prime
prime += 1
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = 1E10 ) -> int:
'''simple docstring'''
lowercase_ = sieve()
lowercase_ = 1
while True:
lowercase_ = next(__lowerCAmelCase )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(__lowerCAmelCase )
n += 2
if __name__ == "__main__":
print(solution())
| 313
| 0
|
"""simple docstring"""
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "philschmid/bart-large-cnn-samsum"
lowercase__ = (
"This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, "
"and returns a summary of the text."
)
lowercase__ = "summarizer"
lowercase__ = AutoTokenizer
lowercase__ = AutoModelForSeqaSeqLM
lowercase__ = ["text"]
lowercase__ = ["text"]
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : Dict):
"""simple docstring"""
return self.pre_processor(lowerCAmelCase_ , return_tensors="""pt""" , truncation=lowerCAmelCase_)
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
return self.model.generate(**lowerCAmelCase_)[0]
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
return self.pre_processor.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_)
| 362
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Any , lowerCAmelCase_ : int = 6):
"""simple docstring"""
lowercase_ = None
lowercase_ = None
self.create_linked_list(lowerCAmelCase_)
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ = Node()
lowercase_ = current_node
lowercase_ = current_node
lowercase_ = current_node
for _ in range(1 , lowerCAmelCase_):
lowercase_ = Node()
lowercase_ = current_node
lowercase_ = previous_node
lowercase_ = current_node
lowercase_ = self.front
lowercase_ = previous_node
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
self.check_can_perform_operation()
return self.front.data if self.front else None
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : Any):
"""simple docstring"""
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
lowercase_ = self.rear.next
if self.rear:
lowercase_ = data
def _UpperCAmelCase ( self : str):
"""simple docstring"""
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
lowercase_ = self.front.data
lowercase_ = None
return data
lowercase_ = self.front
lowercase_ = old_front.next
lowercase_ = old_front.data
lowercase_ = None
return data
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
if self.is_empty():
raise Exception("""Empty Queue""")
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""")
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[str]):
"""simple docstring"""
lowercase_ = None
lowercase_ = None
lowercase_ = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
UpperCAmelCase : str = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
UpperCAmelCase : str = TaTokenizerFast
UpperCAmelCase : Dict = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[Any] = [
"MT5EncoderModel",
"MT5ForConditionalGeneration",
"MT5ForQuestionAnswering",
"MT5Model",
"MT5PreTrainedModel",
"MT5Stack",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[str] = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Tuple = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
UpperCAmelCase : Dict = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast},
module_spec=__spec__,
)
| 363
|
"""simple docstring"""
from collections.abc import Sequence
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase = False ) -> float:
'''simple docstring'''
if not arr:
return 0
lowercase_ = 0 if allow_empty_subarrays else float("""-inf""" )
lowercase_ = 0.0
for num in arr:
lowercase_ = max(0 if allow_empty_subarrays else num , curr_sum + num )
lowercase_ = max(__lowerCAmelCase , __lowerCAmelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase : Union[str, Any] = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F"{max_subarray_sum(nums) = }")
| 313
| 0
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Tuple = logging.get_logger(__name__)
UpperCAmelCase : Optional[int] = {
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( a_ ):
lowercase__ = '''git_vision_model'''
def __init__( self : str , lowerCAmelCase_ : List[str]=7_6_8 , lowerCAmelCase_ : str=3_0_7_2 , lowerCAmelCase_ : List[str]=1_2 , lowerCAmelCase_ : List[Any]=1_2 , lowerCAmelCase_ : Tuple=3 , lowerCAmelCase_ : Optional[int]=2_2_4 , lowerCAmelCase_ : Tuple=1_6 , lowerCAmelCase_ : Any="quick_gelu" , lowerCAmelCase_ : List[Any]=1E-5 , lowerCAmelCase_ : Tuple=0.0 , lowerCAmelCase_ : Optional[int]=0.02 , **lowerCAmelCase_ : List[str] , ):
"""simple docstring"""
super().__init__(**lowercase_)
lowercase_ = hidden_size
lowercase_ = intermediate_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = num_channels
lowercase_ = patch_size
lowercase_ = image_size
lowercase_ = initializer_range
lowercase_ = attention_dropout
lowercase_ = layer_norm_eps
lowercase_ = hidden_act
@classmethod
def _UpperCAmelCase ( cls : int , lowerCAmelCase_ : Dict , **lowerCAmelCase_ : List[Any]):
"""simple docstring"""
cls._set_token_in_kwargs(lowercase_)
lowercase_ , lowercase_ = cls.get_config_dict(lowercase_ , **lowercase_)
# get the vision config dict if we are loading from GITConfig
if config_dict.get("""model_type""") == "git":
lowercase_ = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""") and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''')
return cls.from_dict(lowercase_ , **lowercase_)
class SCREAMING_SNAKE_CASE__ ( a_ ):
lowercase__ = '''git'''
def __init__( self : Union[str, Any] , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Any=3_0_5_2_2 , lowerCAmelCase_ : Tuple=7_6_8 , lowerCAmelCase_ : Optional[Any]=6 , lowerCAmelCase_ : List[str]=1_2 , lowerCAmelCase_ : Dict=3_0_7_2 , lowerCAmelCase_ : str="gelu" , lowerCAmelCase_ : Union[str, Any]=0.1 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : Tuple=1_0_2_4 , lowerCAmelCase_ : Union[str, Any]=0.02 , lowerCAmelCase_ : List[str]=1E-12 , lowerCAmelCase_ : int=0 , lowerCAmelCase_ : int="absolute" , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : int=False , lowerCAmelCase_ : Any=1_0_1 , lowerCAmelCase_ : int=1_0_2 , lowerCAmelCase_ : Any=None , **lowerCAmelCase_ : str , ):
"""simple docstring"""
super().__init__(bos_token_id=lowercase_ , eos_token_id=lowercase_ , pad_token_id=lowercase_ , **lowercase_)
if vision_config is None:
lowercase_ = {}
logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""")
lowercase_ = GitVisionConfig(**lowercase_)
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = hidden_act
lowercase_ = intermediate_size
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = position_embedding_type
lowercase_ = use_cache
lowercase_ = tie_word_embeddings
lowercase_ = num_image_with_embedding
lowercase_ = bos_token_id
lowercase_ = eos_token_id
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = copy.deepcopy(self.__dict__)
lowercase_ = self.vision_config.to_dict()
lowercase_ = self.__class__.model_type
return output
| 364
|
"""simple docstring"""
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
UpperCAmelCase : Optional[int] = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[Any]=None):
"""simple docstring"""
lowercase_ = self.layer[current_layer](lowerCAmelCase_ , lowerCAmelCase_ , head_mask[current_layer])
lowercase_ = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , __UpperCAmelCase , )
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def __init__( self : Any , lowerCAmelCase_ : Dict):
"""simple docstring"""
super().__init__(lowerCAmelCase_)
lowercase_ = BertEncoderWithPabee(lowerCAmelCase_)
self.init_weights()
lowercase_ = 0
lowercase_ = 0
lowercase_ = 0
lowercase_ = 0
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ = threshold
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
lowercase_ = patience
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = 0
lowercase_ = 0
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = self.inference_layers_num / self.inference_instances_num
lowercase_ = (
F'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='''
F''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'''
)
print(lowerCAmelCase_)
@add_start_docstrings_to_model_forward(lowerCAmelCase_)
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : str=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[Any]=False , ):
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""")
elif input_ids is not None:
lowercase_ = input_ids.size()
elif inputs_embeds is not None:
lowercase_ = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""")
lowercase_ = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
lowercase_ = torch.ones(lowerCAmelCase_ , device=lowerCAmelCase_)
if token_type_ids is None:
lowercase_ = torch.zeros(lowerCAmelCase_ , dtype=torch.long , device=lowerCAmelCase_)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
lowercase_ = self.get_extended_attention_mask(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
lowercase_ , lowercase_ , lowercase_ = encoder_hidden_states.size()
lowercase_ = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
lowercase_ = torch.ones(lowerCAmelCase_ , device=lowerCAmelCase_)
lowercase_ = self.invert_attention_mask(lowerCAmelCase_)
else:
lowercase_ = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
lowercase_ = self.get_head_mask(lowerCAmelCase_ , self.config.num_hidden_layers)
lowercase_ = self.embeddings(
input_ids=lowerCAmelCase_ , position_ids=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , inputs_embeds=lowerCAmelCase_)
lowercase_ = embedding_output
if self.training:
lowercase_ = []
for i in range(self.config.num_hidden_layers):
lowercase_ = self.encoder.adaptive_forward(
lowerCAmelCase_ , current_layer=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , head_mask=lowerCAmelCase_)
lowercase_ = self.pooler(lowerCAmelCase_)
lowercase_ = output_layers[i](output_dropout(lowerCAmelCase_))
res.append(lowerCAmelCase_)
elif self.patience == 0: # Use all layers for inference
lowercase_ = self.encoder(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , )
lowercase_ = self.pooler(encoder_outputs[0])
lowercase_ = [output_layers[self.config.num_hidden_layers - 1](lowerCAmelCase_)]
else:
lowercase_ = 0
lowercase_ = None
lowercase_ = 0
for i in range(self.config.num_hidden_layers):
calculated_layer_num += 1
lowercase_ = self.encoder.adaptive_forward(
lowerCAmelCase_ , current_layer=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , head_mask=lowerCAmelCase_)
lowercase_ = self.pooler(lowerCAmelCase_)
lowercase_ = output_layers[i](lowerCAmelCase_)
if regression:
lowercase_ = logits.detach()
if patient_result is not None:
lowercase_ = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels) < self.regression_threshold:
patient_counter += 1
else:
lowercase_ = 0
else:
lowercase_ = logits.detach().argmax(dim=1)
if patient_result is not None:
lowercase_ = patient_result.detach().argmax(dim=1)
if (patient_result is not None) and torch.all(labels.eq(lowerCAmelCase_)):
patient_counter += 1
else:
lowercase_ = 0
lowercase_ = logits
if patient_counter == self.patience:
break
lowercase_ = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , __UpperCAmelCase , )
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def __init__( self : Optional[Any] , lowerCAmelCase_ : str):
"""simple docstring"""
super().__init__(lowerCAmelCase_)
lowercase_ = config.num_labels
lowercase_ = BertModelWithPabee(lowerCAmelCase_)
lowercase_ = nn.Dropout(config.hidden_dropout_prob)
lowercase_ = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels) for _ in range(config.num_hidden_layers)])
self.init_weights()
@add_start_docstrings_to_model_forward(lowerCAmelCase_)
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : List[str]=None , ):
"""simple docstring"""
lowercase_ = self.bert(
input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , position_ids=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , inputs_embeds=lowerCAmelCase_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
lowercase_ = (logits[-1],)
if labels is not None:
lowercase_ = None
lowercase_ = 0
for ix, logits_item in enumerate(lowerCAmelCase_):
if self.num_labels == 1:
# We are doing regression
lowercase_ = MSELoss()
lowercase_ = loss_fct(logits_item.view(-1) , labels.view(-1))
else:
lowercase_ = CrossEntropyLoss()
lowercase_ = loss_fct(logits_item.view(-1 , self.num_labels) , labels.view(-1))
if total_loss is None:
lowercase_ = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
lowercase_ = (total_loss / total_weights,) + outputs
return outputs
| 313
| 0
|
"""simple docstring"""
from __future__ import annotations
from math import pi
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> dict[str, float]:
'''simple docstring'''
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if inductance < 0:
raise ValueError("""Inductance cannot be negative""" )
if frequency < 0:
raise ValueError("""Frequency cannot be negative""" )
if reactance < 0:
raise ValueError("""Inductive reactance cannot be negative""" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 365
|
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Any:
'''simple docstring'''
if (
(cp >= 0x4e00 and cp <= 0x9fff)
or (cp >= 0x3400 and cp <= 0x4dbf) #
or (cp >= 0x20000 and cp <= 0x2a6df) #
or (cp >= 0x2a700 and cp <= 0x2b73f) #
or (cp >= 0x2b740 and cp <= 0x2b81f) #
or (cp >= 0x2b820 and cp <= 0x2ceaf) #
or (cp >= 0xf900 and cp <= 0xfaff)
or (cp >= 0x2f800 and cp <= 0x2fa1f) #
): #
return True
return False
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[str]:
'''simple docstring'''
for char in word:
lowercase_ = ord(__lowerCAmelCase )
if not _is_chinese_char(__lowerCAmelCase ):
return 0
return 1
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Any:
'''simple docstring'''
lowercase_ = set()
for token in tokens:
lowercase_ = len(__lowerCAmelCase ) > 1 and is_chinese(__lowerCAmelCase )
if chinese_word:
word_set.add(__lowerCAmelCase )
lowercase_ = list(__lowerCAmelCase )
return word_list
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
lowercase_ = max([len(__lowerCAmelCase ) for w in chinese_word_set] )
lowercase_ = bert_tokens
lowercase_ , lowercase_ = 0, len(__lowerCAmelCase )
while start < end:
lowercase_ = True
if is_chinese(bert_word[start] ):
lowercase_ = min(end - start , __lowerCAmelCase )
for i in range(__lowerCAmelCase , 1 , -1 ):
lowercase_ = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
lowercase_ = """##""" + bert_word[j]
lowercase_ = start + i
lowercase_ = False
break
if single_word:
start += 1
return bert_word
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = []
for i in range(0 , len(__lowerCAmelCase ) , 1_00 ):
lowercase_ = ltp_tokenizer.seg(lines[i : i + 1_00] )[0]
lowercase_ = [get_chinese_word(__lowerCAmelCase ) for r in res]
ltp_res.extend(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
lowercase_ = []
for i in range(0 , len(__lowerCAmelCase ) , 1_00 ):
lowercase_ = bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=5_12 )
bert_res.extend(res["""input_ids"""] )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
lowercase_ = []
for input_ids, chinese_word in zip(__lowerCAmelCase , __lowerCAmelCase ):
lowercase_ = []
for id in input_ids:
lowercase_ = bert_tokenizer._convert_id_to_token(__lowerCAmelCase )
input_tokens.append(__lowerCAmelCase )
lowercase_ = add_sub_symbol(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__lowerCAmelCase ):
if token[:2] == "##":
lowercase_ = token[2:]
# save chinese tokens' pos
if len(__lowerCAmelCase ) == 1 and _is_chinese_char(ord(__lowerCAmelCase ) ):
ref_id.append(__lowerCAmelCase )
ref_ids.append(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
return ref_ids
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
with open(args.file_name , """r""" , encoding="""utf-8""" ) as f:
lowercase_ = f.readlines()
lowercase_ = [line.strip() for line in data if len(__lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
lowercase_ = LTP(args.ltp ) # faster in GPU device
lowercase_ = BertTokenizer.from_pretrained(args.bert )
lowercase_ = prepare_ref(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
with open(args.save_path , """w""" , encoding="""utf-8""" ) as f:
lowercase_ = [json.dumps(__lowerCAmelCase ) + """\n""" for ref in ref_ids]
f.writelines(__lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase : List[Any] = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp", type=str, default="./resources/ltp", help="resources for LTP tokenizer, usually a path"
)
parser.add_argument("--bert", type=str, default="./resources/robert", help="resources for Bert tokenizer")
parser.add_argument("--save_path", type=str, default="./resources/ref.txt", help="path to save res")
UpperCAmelCase : int = parser.parse_args()
main(args)
| 313
| 0
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
UpperCAmelCase : Any = TypeVar("T")
UpperCAmelCase : Optional[Any] = TypeVar("U")
class SCREAMING_SNAKE_CASE__ ( Generic[T, U] ):
def __init__( self : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str]):
"""simple docstring"""
lowercase_ = key
lowercase_ = val
lowercase_ = None
lowercase_ = None
def __repr__( self : Optional[int]):
"""simple docstring"""
return (
F'''Node: key: {self.key}, val: {self.val}, '''
F'''has next: {bool(self.next)}, has prev: {bool(self.prev)}'''
)
class SCREAMING_SNAKE_CASE__ ( Generic[T, U] ):
def __init__( self : Optional[Any]):
"""simple docstring"""
lowercase_ = DoubleLinkedListNode(__UpperCAmelCase , __UpperCAmelCase)
lowercase_ = DoubleLinkedListNode(__UpperCAmelCase , __UpperCAmelCase)
lowercase_ = self.rear, self.head
def __repr__( self : Dict):
"""simple docstring"""
lowercase_ = ["""DoubleLinkedList"""]
lowercase_ = self.head
while node.next is not None:
rep.append(str(__UpperCAmelCase))
lowercase_ = node.next
rep.append(str(self.rear))
return ",\n ".join(__UpperCAmelCase)
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : Tuple):
"""simple docstring"""
lowercase_ = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
lowercase_ = node
lowercase_ = previous
lowercase_ = node
lowercase_ = self.rear
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
if node.prev is None or node.next is None:
return None
lowercase_ = node.next
lowercase_ = node.prev
lowercase_ = None
lowercase_ = None
return node
class SCREAMING_SNAKE_CASE__ ( Generic[T, U] ):
lowercase__ = {}
def __init__( self : str , lowerCAmelCase_ : List[Any]):
"""simple docstring"""
lowercase_ = DoubleLinkedList()
lowercase_ = capacity
lowercase_ = 0
lowercase_ = 0
lowercase_ = 0
lowercase_ = {}
def __repr__( self : Dict):
"""simple docstring"""
return (
F'''CacheInfo(hits={self.hits}, misses={self.miss}, '''
F'''capacity={self.capacity}, current size={self.num_keys})'''
)
def __contains__( self : List[str] , lowerCAmelCase_ : Tuple):
"""simple docstring"""
return key in self.cache
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : str):
"""simple docstring"""
if key in self.cache:
self.hits += 1
lowercase_ = self.cache[key]
lowercase_ = self.list.remove(self.cache[key])
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(__UpperCAmelCase)
return node.val
self.miss += 1
return None
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict):
"""simple docstring"""
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
lowercase_ = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(__UpperCAmelCase) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
lowercase_ = DoubleLinkedListNode(__UpperCAmelCase , __UpperCAmelCase)
self.list.add(self.cache[key])
self.num_keys += 1
else:
# bump node to the end of the list, update value
lowercase_ = self.list.remove(self.cache[key])
assert node is not None # node guaranteed to be in list
lowercase_ = value
self.list.add(__UpperCAmelCase)
@classmethod
def _UpperCAmelCase ( cls : Tuple , lowerCAmelCase_ : int = 1_2_8):
"""simple docstring"""
def cache_decorator_inner(lowerCAmelCase_ : List[str]) -> Callable[..., U]:
def cache_decorator_wrapper(*lowerCAmelCase_ : List[Any]) -> U:
if func not in cls.decorator_function_to_instance_map:
lowercase_ = LRUCache(__UpperCAmelCase)
lowercase_ = cls.decorator_function_to_instance_map[func].get(args[0])
if result is None:
lowercase_ = func(*__UpperCAmelCase)
cls.decorator_function_to_instance_map[func].put(args[0] , __UpperCAmelCase)
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(__UpperCAmelCase , """cache_info""" , __UpperCAmelCase) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 366
|
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> list[str]:
'''simple docstring'''
if nth_term == "":
return [""]
lowercase_ = int(__lowerCAmelCase )
lowercase_ = int(__lowerCAmelCase )
lowercase_ = []
for temp in range(int(__lowerCAmelCase ) ):
series.append(F'''1 / {pow(temp + 1 , int(__lowerCAmelCase ) )}''' if series else """1""" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase : List[str] = int(input("Enter the last number (nth term) of the P-Series"))
UpperCAmelCase : Tuple = int(input("Enter the power for P-Series"))
print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
print(p_series(nth_term, power))
| 313
| 0
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Any:
'''simple docstring'''
def get_matched_characters(__lowerCAmelCase , __lowerCAmelCase ) -> str:
lowercase_ = []
lowercase_ = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
lowercase_ = int(max(0 , i - limit ) )
lowercase_ = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(a_ )
lowercase_ = F'''{_stra[0:_stra.index(a_ )]} {_stra[_stra.index(a_ ) + 1:]}'''
return "".join(a_ )
# matching characters
lowercase_ = get_matched_characters(a_ , a_ )
lowercase_ = get_matched_characters(a_ , a_ )
lowercase_ = len(a_ )
# transposition
lowercase_ = (
len([(ca, ca) for ca, ca in zip(a_ , a_ ) if ca != ca] ) // 2
)
if not match_count:
lowercase_ = 0.0
else:
lowercase_ = (
1
/ 3
* (
match_count / len(a_ )
+ match_count / len(a_ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
lowercase_ = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("hello", "world"))
| 367
|
"""simple docstring"""
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = Vector([1, 2, 3])
self.assertEqual(x.component(0) , 1)
self.assertEqual(x.component(2) , 3)
lowercase_ = Vector()
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = Vector([0, 0, 0, 0, 0, 1])
self.assertEqual(str(lowerCAmelCase_) , """(0,0,0,0,0,1)""")
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = Vector([1, 2, 3, 4])
self.assertEqual(len(lowerCAmelCase_) , 4)
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = Vector([1, 2])
lowercase_ = Vector([1, 2, 3, 4, 5])
lowercase_ = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
lowercase_ = Vector([1, -1, 1, -1, 2, -3, 4, -5])
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3)
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3)
self.assertEqual(z.euclidean_length() , 0)
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = Vector([1, 2, 3])
lowercase_ = Vector([1, 1, 1])
self.assertEqual((x + y).component(0) , 2)
self.assertEqual((x + y).component(1) , 3)
self.assertEqual((x + y).component(2) , 4)
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = Vector([1, 2, 3])
lowercase_ = Vector([1, 1, 1])
self.assertEqual((x - y).component(0) , 0)
self.assertEqual((x - y).component(1) , 1)
self.assertEqual((x - y).component(2) , 2)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = Vector([1, 2, 3])
lowercase_ = Vector([2, -1, 4]) # for test of dot product
lowercase_ = Vector([1, -2, -1])
self.assertEqual(str(x * 3.0) , """(3.0,6.0,9.0)""")
self.assertEqual((a * b) , 0)
def _UpperCAmelCase ( self : int):
"""simple docstring"""
self.assertEqual(str(zero_vector(1_0)).count("""0""") , 1_0)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
self.assertEqual(str(unit_basis_vector(3 , 1)) , """(0,1,0)""")
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = Vector([1, 2, 3])
lowercase_ = Vector([1, 0, 1])
self.assertEqual(str(axpy(2 , lowerCAmelCase_ , lowerCAmelCase_)) , """(3,4,7)""")
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = Vector([1, 0, 0, 0, 0, 0])
lowercase_ = x.copy()
self.assertEqual(str(lowerCAmelCase_) , str(lowerCAmelCase_))
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = Vector([1, 0, 0])
x.change_component(0 , 0)
x.change_component(1 , 1)
self.assertEqual(str(lowerCAmelCase_) , """(0,1,0)""")
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(lowerCAmelCase_))
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
lowercase_ = [[-3, -1_4, -1_0], [-5, -1_0, -5], [-2, -1, 0]]
for x in range(a.height()):
for y in range(a.width()):
self.assertEqual(minors[x][y] , a.minor(lowerCAmelCase_ , lowerCAmelCase_))
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
lowercase_ = [[-3, 1_4, -1_0], [5, -1_0, 5], [-2, 1, 0]]
for x in range(a.height()):
for y in range(a.width()):
self.assertEqual(cofactors[x][y] , a.cofactor(lowerCAmelCase_ , lowerCAmelCase_))
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
self.assertEqual(-5 , a.determinant())
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3)
lowercase_ = Vector([1, 2, 3])
self.assertEqual("""(14,32,50)""" , str(a * x))
self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2))
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
a.change_component(0 , 2 , 5)
self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(lowerCAmelCase_))
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
self.assertEqual(7 , a.component(2 , 1) , 0.01)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
lowercase_ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 1_0]] , 3 , 3)
self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b))
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
lowercase_ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 1_0]] , 3 , 3)
self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b))
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
self.assertEqual(
"""|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5)) , )
if __name__ == "__main__":
unittest.main()
| 313
| 0
|
"""simple docstring"""
from collections import defaultdict
from math import gcd
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = 1_50_00_00 ) -> int:
'''simple docstring'''
lowercase_ = defaultdict(_UpperCamelCase )
lowercase_ = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , _UpperCamelCase , 2 ):
if gcd(_UpperCamelCase , _UpperCamelCase ) > 1:
continue
lowercase_ = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(_UpperCamelCase , limit + 1 , _UpperCamelCase ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"{solution() = }")
| 368
|
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = 0
if start < end:
lowercase_ = randint(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = a[end]
lowercase_ = a[pivot]
lowercase_ = temp
lowercase_ , lowercase_ = _in_place_partition(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
count += _in_place_quick_sort(__lowerCAmelCase , __lowerCAmelCase , p - 1 )
count += _in_place_quick_sort(__lowerCAmelCase , p + 1 , __lowerCAmelCase )
return count
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = 0
lowercase_ = randint(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = a[end]
lowercase_ = a[pivot]
lowercase_ = temp
lowercase_ = start - 1
for index in range(__lowerCAmelCase , __lowerCAmelCase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
lowercase_ = new_pivot_index + 1
lowercase_ = a[new_pivot_index]
lowercase_ = a[index]
lowercase_ = temp
lowercase_ = a[new_pivot_index + 1]
lowercase_ = a[end]
lowercase_ = temp
return new_pivot_index + 1, count
UpperCAmelCase : Union[str, Any] = TemporaryFile()
UpperCAmelCase : Optional[int] = 100 # 1000 elements are to be sorted
UpperCAmelCase , UpperCAmelCase : List[str] = 0, 1 # mean and standard deviation
UpperCAmelCase : Optional[Any] = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
UpperCAmelCase : List[str] = np.load(outfile)
UpperCAmelCase : List[Any] = len(M) - 1
UpperCAmelCase : Optional[int] = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 313
| 0
|
"""simple docstring"""
from functools import reduce
UpperCAmelCase : List[Any] = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = N ) -> Union[str, Any]:
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda __lowerCAmelCase , __lowerCAmelCase : str(int(a__ ) * int(a__ ) ) , n[i : i + 13] ) )
for i in range(len(a__ ) - 12 ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 369
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase=False ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''module.blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''module.blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""module.cls_token""", """vit.embeddings.cls_token"""),
("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""module.pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""module.norm.weight""", """layernorm.weight"""),
("""module.norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowercase_ = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ) -> Any:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
lowercase_ = """"""
else:
lowercase_ = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase_ = state_dict.pop(F'''module.blocks.{i}.attn.qkv.weight''' )
lowercase_ = state_dict.pop(F'''module.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase_ = in_proj_weight[
: config.hidden_size, :
]
lowercase_ = in_proj_bias[: config.hidden_size]
lowercase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase_ = in_proj_weight[
-config.hidden_size :, :
]
lowercase_ = in_proj_bias[-config.hidden_size :]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase_ = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = [
"""module.fc.fc1.weight""",
"""module.fc.fc1.bias""",
"""module.fc.bn1.weight""",
"""module.fc.bn1.bias""",
"""module.fc.bn1.running_mean""",
"""module.fc.bn1.running_var""",
"""module.fc.bn1.num_batches_tracked""",
"""module.fc.fc2.weight""",
"""module.fc.fc2.bias""",
"""module.fc.bn2.weight""",
"""module.fc.bn2.bias""",
"""module.fc.bn2.running_mean""",
"""module.fc.bn2.running_var""",
"""module.fc.bn2.num_batches_tracked""",
"""module.fc.fc3.weight""",
"""module.fc.fc3.bias""",
]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = dct.pop(__lowerCAmelCase )
lowercase_ = val
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = ViTMSNConfig()
lowercase_ = 10_00
lowercase_ = """datasets/huggingface/label-files"""
lowercase_ = """imagenet-1k-id2label.json"""
lowercase_ = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase ) , """r""" ) )
lowercase_ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
lowercase_ = idalabel
lowercase_ = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowercase_ = 3_84
lowercase_ = 15_36
lowercase_ = 6
elif "l16" in checkpoint_url:
lowercase_ = 10_24
lowercase_ = 40_96
lowercase_ = 24
lowercase_ = 16
lowercase_ = 0.1
elif "b4" in checkpoint_url:
lowercase_ = 4
elif "l7" in checkpoint_url:
lowercase_ = 7
lowercase_ = 10_24
lowercase_ = 40_96
lowercase_ = 24
lowercase_ = 16
lowercase_ = 0.1
lowercase_ = ViTMSNModel(__lowerCAmelCase )
lowercase_ = torch.hub.load_state_dict_from_url(__lowerCAmelCase , map_location="""cpu""" )["""target_encoder"""]
lowercase_ = ViTImageProcessor(size=config.image_size )
remove_projection_head(__lowerCAmelCase )
lowercase_ = create_rename_keys(__lowerCAmelCase , base_model=__lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase , base_model=__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
lowercase_ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase_ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
lowercase_ = ViTImageProcessor(
size=config.image_size , image_mean=__lowerCAmelCase , image_std=__lowerCAmelCase )
lowercase_ = image_processor(images=__lowerCAmelCase , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
lowercase_ = model(**__lowerCAmelCase )
lowercase_ = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowercase_ = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] )
elif "b16" in checkpoint_url:
lowercase_ = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] )
elif "l16" in checkpoint_url:
lowercase_ = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] )
elif "b4" in checkpoint_url:
lowercase_ = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] )
else:
lowercase_ = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , __lowerCAmelCase , atol=1E-4 )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCAmelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
UpperCAmelCase : Tuple = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 313
| 0
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[str]:
'''simple docstring'''
if collection == []:
return []
# get some information about the collection
lowercase_ = len(__SCREAMING_SNAKE_CASE )
lowercase_ = max(__SCREAMING_SNAKE_CASE )
lowercase_ = min(__SCREAMING_SNAKE_CASE )
# create the counting array
lowercase_ = coll_max + 1 - coll_min
lowercase_ = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , __SCREAMING_SNAKE_CASE ):
lowercase_ = counting_arr[i] + counting_arr[i - 1]
# create the output collection
lowercase_ = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , __SCREAMING_SNAKE_CASE ) ):
lowercase_ = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return "".join([chr(__SCREAMING_SNAKE_CASE ) for i in counting_sort([ord(__SCREAMING_SNAKE_CASE ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("thisisthestring") == "eghhiiinrsssttt"
UpperCAmelCase : Tuple = input("Enter numbers separated by a comma:\n").strip()
UpperCAmelCase : Union[str, Any] = [int(item) for item in user_input.split(",")]
print(counting_sort(unsorted))
| 370
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
UpperCAmelCase : Tuple = logging.get_logger(__name__)
UpperCAmelCase : Optional[int] = {
"deepmind/language-perceiver": "https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "perceiver"
def __init__( self : Optional[int] , lowerCAmelCase_ : List[str]=2_5_6 , lowerCAmelCase_ : Dict=1_2_8_0 , lowerCAmelCase_ : List[Any]=7_6_8 , lowerCAmelCase_ : Optional[Any]=1 , lowerCAmelCase_ : List[Any]=2_6 , lowerCAmelCase_ : Optional[Any]=8 , lowerCAmelCase_ : Tuple=8 , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Optional[Any]="kv" , lowerCAmelCase_ : Dict=1 , lowerCAmelCase_ : Optional[Any]=1 , lowerCAmelCase_ : List[str]="gelu" , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Union[str, Any]=0.02 , lowerCAmelCase_ : List[Any]=1E-12 , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Union[str, Any]=2_6_2 , lowerCAmelCase_ : Union[str, Any]=2_0_4_8 , lowerCAmelCase_ : Any=5_6 , lowerCAmelCase_ : int=[3_6_8, 4_9_6] , lowerCAmelCase_ : Optional[int]=1_6 , lowerCAmelCase_ : Dict=1_9_2_0 , lowerCAmelCase_ : Optional[Any]=1_6 , lowerCAmelCase_ : Tuple=[1, 1_6, 2_2_4, 2_2_4] , **lowerCAmelCase_ : Union[str, Any] , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_)
lowercase_ = num_latents
lowercase_ = d_latents
lowercase_ = d_model
lowercase_ = num_blocks
lowercase_ = num_self_attends_per_block
lowercase_ = num_self_attention_heads
lowercase_ = num_cross_attention_heads
lowercase_ = qk_channels
lowercase_ = v_channels
lowercase_ = cross_attention_shape_for_attention
lowercase_ = self_attention_widening_factor
lowercase_ = cross_attention_widening_factor
lowercase_ = hidden_act
lowercase_ = attention_probs_dropout_prob
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = use_query_residual
# masked language modeling attributes
lowercase_ = vocab_size
lowercase_ = max_position_embeddings
# image classification attributes
lowercase_ = image_size
# flow attributes
lowercase_ = train_size
# multimodal autoencoding attributes
lowercase_ = num_frames
lowercase_ = audio_samples_per_frame
lowercase_ = samples_per_patch
lowercase_ = output_shape
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
@property
def _UpperCAmelCase ( self : str):
"""simple docstring"""
if self.task == "multiple-choice":
lowercase_ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowercase_ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""inputs""", dynamic_axis),
("""attention_mask""", dynamic_axis),
])
@property
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
return 1E-4
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional[TensorType] = None , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 4_0 , lowerCAmelCase_ : int = 4_0 , ):
"""simple docstring"""
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase_ = compute_effective_axis_dimension(
lowerCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase_ = preprocessor.num_special_tokens_to_add(lowerCAmelCase_)
lowercase_ = compute_effective_axis_dimension(
lowerCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase_)
# Generate dummy inputs according to compute batch and sequence
lowercase_ = [""" """.join(["""a"""]) * seq_length] * batch_size
lowercase_ = dict(preprocessor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_))
lowercase_ = inputs.pop("""input_ids""")
return inputs
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase_ = compute_effective_axis_dimension(lowerCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch)
lowercase_ = self._generate_dummy_images(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = dict(preprocessor(images=lowerCAmelCase_ , return_tensors=lowerCAmelCase_))
lowercase_ = inputs.pop("""pixel_values""")
return inputs
else:
raise ValueError(
"""Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.""")
| 313
| 0
|
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Tuple):
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""]):
lowercase_ = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(lowercase_)
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = """sshleifer/tiny-gpt2"""
lowercase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , )
lowercase_ = PyTorchBenchmark(lowercase_)
lowercase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = """sgugger/tiny-distilbert-classification"""
lowercase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , only_pretrain_model=lowercase_ , )
lowercase_ = PyTorchBenchmark(lowercase_)
lowercase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = """sshleifer/tiny-gpt2"""
lowercase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , torchscript=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , )
lowercase_ = PyTorchBenchmark(lowercase_)
lowercase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""")
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = """sshleifer/tiny-gpt2"""
lowercase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , fpaa=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , )
lowercase_ = PyTorchBenchmark(lowercase_)
lowercase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = """sshleifer/tiny-gpt2"""
lowercase_ = AutoConfig.from_pretrained(lowercase_)
# set architectures equal to `None`
lowercase_ = None
lowercase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , )
lowercase_ = PyTorchBenchmark(lowercase_ , configs=[config])
lowercase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = """sshleifer/tiny-gpt2"""
lowercase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , )
lowercase_ = PyTorchBenchmark(lowercase_)
lowercase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
@unittest.skipIf(torch_device == """cpu""" , """Can't do half precision""")
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = """sshleifer/tiny-gpt2"""
lowercase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=lowercase_ , multi_process=lowercase_ , )
lowercase_ = PyTorchBenchmark(lowercase_)
lowercase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = """sshleifer/tiny-gpt2"""
lowercase_ = AutoConfig.from_pretrained(lowercase_)
lowercase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , )
lowercase_ = PyTorchBenchmark(lowercase_ , configs=[config])
lowercase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = """sshleifer/tinier_bart"""
lowercase_ = AutoConfig.from_pretrained(lowercase_)
lowercase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , )
lowercase_ = PyTorchBenchmark(lowercase_ , configs=[config])
lowercase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
lowercase_ = """sshleifer/tiny-gpt2"""
lowercase_ = AutoConfig.from_pretrained(lowercase_)
lowercase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , )
lowercase_ = PyTorchBenchmark(lowercase_ , configs=[config])
lowercase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = """sshleifer/tinier_bart"""
lowercase_ = AutoConfig.from_pretrained(lowercase_)
lowercase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , )
lowercase_ = PyTorchBenchmark(lowercase_ , configs=[config])
lowercase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , save_to_csv=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(lowercase_ , """inf_time.csv""") , train_memory_csv_file=os.path.join(lowercase_ , """train_mem.csv""") , inference_memory_csv_file=os.path.join(lowercase_ , """inf_mem.csv""") , train_time_csv_file=os.path.join(lowercase_ , """train_time.csv""") , env_info_csv_file=os.path.join(lowercase_ , """env.csv""") , multi_process=lowercase_ , )
lowercase_ = PyTorchBenchmark(lowercase_)
benchmark.run()
self.assertTrue(Path(os.path.join(lowercase_ , """inf_time.csv""")).exists())
self.assertTrue(Path(os.path.join(lowercase_ , """train_time.csv""")).exists())
self.assertTrue(Path(os.path.join(lowercase_ , """inf_mem.csv""")).exists())
self.assertTrue(Path(os.path.join(lowercase_ , """train_mem.csv""")).exists())
self.assertTrue(Path(os.path.join(lowercase_ , """env.csv""")).exists())
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(lowerCAmelCase_ : Union[str, Any]):
self.assertTrue(hasattr(lowercase_ , """sequential"""))
self.assertTrue(hasattr(lowercase_ , """cumulative"""))
self.assertTrue(hasattr(lowercase_ , """current"""))
self.assertTrue(hasattr(lowercase_ , """total"""))
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(lowercase_ , """log.txt""") , log_print=lowercase_ , trace_memory_line_by_line=lowercase_ , multi_process=lowercase_ , )
lowercase_ = PyTorchBenchmark(lowercase_)
lowercase_ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary)
_check_summary_is_not_empty(result.train_summary)
self.assertTrue(Path(os.path.join(lowercase_ , """log.txt""")).exists())
| 371
|
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , unittest.TestCase ):
lowercase__ = BarthezTokenizer
lowercase__ = BarthezTokenizerFast
lowercase__ = True
lowercase__ = True
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
super().setUp()
lowercase_ = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""")
tokenizer.save_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname , legacy_format=lowerCAmelCase_)
lowercase_ = tokenizer
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = """<pad>"""
lowercase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_) , lowerCAmelCase_)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_) , lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , """<s>""")
self.assertEqual(vocab_keys[1] , """<pad>""")
self.assertEqual(vocab_keys[-1] , """<mask>""")
self.assertEqual(len(lowerCAmelCase_) , 1_0_1_1_2_2)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_1_2_2)
@require_torch
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
lowercase_ = [0, 5_7, 3_0_1_8, 7_0_3_0_7, 9_1, 2]
lowercase_ = self.tokenizer(
lowerCAmelCase_ , max_length=len(lowerCAmelCase_) , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors="""pt""")
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_)
self.assertEqual((2, 6) , batch.input_ids.shape)
self.assertEqual((2, 6) , batch.attention_mask.shape)
lowercase_ = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowercase_ = self.get_tokenizer()
lowercase_ = self.get_rust_tokenizer()
lowercase_ = """I was born in 92000, and this is falsé."""
lowercase_ = tokenizer.tokenize(lowerCAmelCase_)
lowercase_ = rust_tokenizer.tokenize(lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_)
lowercase_ = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = self.get_rust_tokenizer()
lowercase_ = tokenizer.encode(lowerCAmelCase_)
lowercase_ = rust_tokenizer.encode(lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
@slow
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = {"""input_ids""": [[0, 4_9_0, 1_4_3_2_8, 4_5_0_7, 3_5_4, 4_7, 4_3_6_6_9, 9_5, 2_5, 7_8_1_1_7, 2_0_2_1_5, 1_9_7_7_9, 1_9_0, 2_2, 4_0_0, 4, 3_5_3_4_3, 8_0_3_1_0, 6_0_3, 8_6, 2_4_9_3_7, 1_0_5, 3_3_4_3_8, 9_4_7_6_2, 1_9_6, 3_9_6_4_2, 7, 1_5, 1_5_9_3_3, 1_7_3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0_5_3_4, 8_7, 2_5, 6_6, 3_3_5_8, 1_9_6, 5_5_2_8_9, 8, 8_2_9_6_1, 8_1, 2_2_0_4, 7_5_2_0_3, 7, 1_5, 7_6_3, 1_2_9_5_6, 2_1_6, 1_7_8, 1_4_3_2_8, 9_5_9_5, 1_3_7_7, 6_9_6_9_3, 7, 4_4_8, 7_1_0_2_1, 1_9_6, 1_8_1_0_6, 1_4_3_7, 1_3_9_7_4, 1_0_8, 9_0_8_3, 4, 4_9_3_1_5, 7, 3_9, 8_6, 1_3_2_6, 2_7_9_3, 4_6_3_3_3, 4, 4_4_8, 1_9_6, 7_4_5_8_8, 7, 4_9_3_1_5, 7, 3_9, 2_1, 8_2_2, 3_8_4_7_0, 7_4, 2_1, 6_6_7_2_3, 6_2_4_8_0, 8, 2_2_0_5_0, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
lowercase_ = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase_ , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=lowerCAmelCase_ , )
| 313
| 0
|
"""simple docstring"""
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
UpperCAmelCase : Optional[Any] = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("", "|", "|"),
datarow=DataRow("", "|", "|"),
padding=1,
with_header_hide=None,
)
UpperCAmelCase : str = []
UpperCAmelCase : Any = []
UpperCAmelCase : Optional[Any] = {"type": "section", "text": {"type": "plain_text", "text": "No failed tests! 🤗", "emoji": True}}
UpperCAmelCase : Dict = [
{
"type": "header",
"text": {
"type": "plain_text",
"text": F"🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results",
"emoji": True,
},
}
]
UpperCAmelCase : int = 0
for log in Path().glob("*.log"):
UpperCAmelCase : List[Any] = 0
with open(log, "r") as f:
for line in f:
UpperCAmelCase : Any = json.loads(line)
if line.get("nodeid", "") != "":
UpperCAmelCase : str = line["nodeid"]
if line.get("duration", None) is not None:
UpperCAmelCase : Optional[Any] = F"{line['duration']:.4f}"
if line.get("outcome", "") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("_")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
UpperCAmelCase : Union[str, Any] = []
log.unlink()
UpperCAmelCase : Dict = ""
UpperCAmelCase : Dict = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
UpperCAmelCase : Union[str, Any] = []
UpperCAmelCase : str = {}
for test in failed_tests:
UpperCAmelCase : Optional[int] = test[0].split("::")
UpperCAmelCase : List[Any] = data[0].split("/")[-1]
if data[0] not in filesafailed:
UpperCAmelCase : List[Any] = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
UpperCAmelCase : str = [test[0] for test in failed_table]
UpperCAmelCase : Optional[int] = list(set(files))
# Count number of instances in failed_tests
UpperCAmelCase : Tuple = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
UpperCAmelCase : str = tabulate(
table,
headers=["Test Location", "Num Failed"],
tablefmt=hf_table_format,
stralign="right",
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
UpperCAmelCase : List[Any] = "Too many failed tests, please see the full report in the Action results."
UpperCAmelCase : Any = len(err) + 10
UpperCAmelCase : List[Any] = message[: 3000 - offset] + F"\n...\n```\n{err}"
print(F"### {message}")
else:
UpperCAmelCase : Optional[int] = "No failed tests! 🤗"
print(F"## {message}")
payload.append(no_error_payload)
if os.environ.get("TEST_TYPE", "") != "":
from slack_sdk import WebClient
UpperCAmelCase : str = WebClient(token=os.environ["SLACK_API_TOKEN"])
if message != "No failed tests! 🤗":
UpperCAmelCase : str = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": message,
},
}
payload.append(md_report)
UpperCAmelCase : Optional[Any] = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": "*For more details:*",
},
"accessory": {
"type": "button",
"text": {
"type": "plain_text",
"text": "Check Action results",
"emoji": True,
},
"url": F"https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
payload.append(action_button)
UpperCAmelCase : Union[str, Any] = {
"type": "context",
"elements": [
{
"type": "plain_text",
"text": F"Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}",
}
],
}
payload.append(date_report)
UpperCAmelCase : Optional[Any] = client.chat_postMessage(channel="#accelerate-ci-daily", text=message, blocks=payload)
UpperCAmelCase : List[str] = response.data["ts"]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
UpperCAmelCase : Any = ""
for i, row in enumerate(test_failures):
if row[0] != test_class:
UpperCAmelCase : Optional[Any] = row[0]
else:
UpperCAmelCase : List[Any] = ""
UpperCAmelCase : Union[str, Any] = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F"Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```",
},
}
client.chat_postMessage(
channel="#accelerate-ci-daily",
thread_ts=ts,
blocks=[payload],
)
| 350
|
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCAmelCase : Optional[Any] = "platform"
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class SCREAMING_SNAKE_CASE__ :
lowercase__ = PegasusConfig
lowercase__ = {}
lowercase__ = "gelu"
def __init__( self : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any]=1_3 , lowerCAmelCase_ : Any=7 , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : str=9_9 , lowerCAmelCase_ : Tuple=3_2 , lowerCAmelCase_ : Dict=5 , lowerCAmelCase_ : Union[str, Any]=4 , lowerCAmelCase_ : Dict=3_7 , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : Optional[int]=2_0 , lowerCAmelCase_ : Tuple=2 , lowerCAmelCase_ : List[str]=1 , lowerCAmelCase_ : Optional[Any]=0 , ):
"""simple docstring"""
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = eos_token_id
lowercase_ = pad_token_id
lowercase_ = bos_token_id
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size).clip(3 , self.vocab_size)
lowercase_ = np.expand_dims(np.array([self.eos_token_id] * self.batch_size) , 1)
lowercase_ = np.concatenate([input_ids, eos_tensor] , axis=1)
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowercase_ = prepare_pegasus_inputs_dict(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
return config, inputs_dict
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
lowercase_ = 2_0
lowercase_ = model_class_name(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""])
lowercase_ , lowercase_ = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowercase_ = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""")
lowercase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase_ = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""")
lowercase_ = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = model.decode(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''')
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict):
"""simple docstring"""
lowercase_ = 2_0
lowercase_ = model_class_name(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""])
lowercase_ , lowercase_ = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowercase_ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
] , axis=-1 , )
lowercase_ = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase_ = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""")
lowercase_ = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = model.decode(lowerCAmelCase_ , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_)
lowercase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''')
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , ) -> Optional[Any]:
'''simple docstring'''
if attention_mask is None:
lowercase_ = np.not_equal(__lowerCAmelCase , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
lowercase_ = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , unittest.TestCase ):
lowercase__ = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
lowercase__ = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
lowercase__ = True
lowercase__ = False
lowercase__ = False
lowercase__ = False
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = FlaxPegasusModelTester(self)
lowercase_ = ConfigTester(self , config_class=lowerCAmelCase_)
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowercase_ = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = model_class(lowerCAmelCase_)
@jax.jit
def encode_jitted(lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int]=None , **lowerCAmelCase_ : Optional[int]):
return model.encode(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_)
with self.subTest("""JIT Enabled"""):
lowercase_ = encode_jitted(**lowerCAmelCase_).to_tuple()
with self.subTest("""JIT Disabled"""):
with jax.disable_jit():
lowercase_ = encode_jitted(**lowerCAmelCase_).to_tuple()
self.assertEqual(len(lowerCAmelCase_) , len(lowerCAmelCase_))
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowercase_ = model_class(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""])
lowercase_ = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict):
return model.decode(
decoder_input_ids=lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , encoder_outputs=lowerCAmelCase_ , )
with self.subTest("""JIT Enabled"""):
lowercase_ = decode_jitted(**lowerCAmelCase_).to_tuple()
with self.subTest("""JIT Disabled"""):
with jax.disable_jit():
lowercase_ = decode_jitted(**lowerCAmelCase_).to_tuple()
self.assertEqual(len(lowerCAmelCase_) , len(lowerCAmelCase_))
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowercase_ = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=lowerCAmelCase_)
lowercase_ = np.ones((1, 1))
lowercase_ = model(lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
@slow
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""")
lowercase_ = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""")
lowercase_ = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
lowercase_ = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
lowercase_ = tokenizer(lowerCAmelCase_ , return_tensors="""np""" , truncation=lowerCAmelCase_ , max_length=5_1_2 , padding=lowerCAmelCase_)
lowercase_ = model.generate(**lowerCAmelCase_ , num_beams=2).sequences
lowercase_ = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_)
assert tgt_text == decoded
| 313
| 0
|
"""simple docstring"""
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
UpperCAmelCase : Any = logging.get_logger("transformers.models.speecht5")
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
hf_model.apply_weight_norm()
lowercase_ = checkpoint["input_conv.weight_g"]
lowercase_ = checkpoint["input_conv.weight_v"]
lowercase_ = checkpoint["input_conv.bias"]
for i in range(len(config.upsample_rates ) ):
lowercase_ = checkpoint[F'''upsamples.{i}.1.weight_g''']
lowercase_ = checkpoint[F'''upsamples.{i}.1.weight_v''']
lowercase_ = checkpoint[F'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
lowercase_ = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_g''']
lowercase_ = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_v''']
lowercase_ = checkpoint[F'''blocks.{i}.convs1.{j}.1.bias''']
lowercase_ = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_g''']
lowercase_ = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_v''']
lowercase_ = checkpoint[F'''blocks.{i}.convs2.{j}.1.bias''']
lowercase_ = checkpoint["output_conv.1.weight_g"]
lowercase_ = checkpoint["output_conv.1.weight_v"]
lowercase_ = checkpoint["output_conv.1.bias"]
hf_model.remove_weight_norm()
@torch.no_grad()
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , ) -> Union[str, Any]:
if config_path is not None:
lowercase_ = SpeechTaHifiGanConfig.from_pretrained(__lowerCamelCase )
else:
lowercase_ = SpeechTaHifiGanConfig()
lowercase_ = SpeechTaHifiGan(__lowerCamelCase )
lowercase_ = torch.load(__lowerCamelCase )
load_weights(orig_checkpoint["""model"""]["""generator"""] , __lowerCamelCase , __lowerCamelCase )
lowercase_ = np.load(__lowerCamelCase )
lowercase_ = stats[0].reshape(-1 )
lowercase_ = stats[1].reshape(-1 )
lowercase_ = torch.from_numpy(__lowerCamelCase ).float()
lowercase_ = torch.from_numpy(__lowerCamelCase ).float()
model.save_pretrained(__lowerCamelCase )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(__lowerCamelCase )
if __name__ == "__main__":
UpperCAmelCase : int = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
UpperCAmelCase : Dict = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 351
|
"""simple docstring"""
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class SCREAMING_SNAKE_CASE__ :
lowercase__ = None
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = None
lowercase__ = None
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = True
lowercase__ = None
lowercase__ = 1
lowercase__ = None
lowercase__ = False
lowercase__ = None
lowercase__ = None
def _UpperCAmelCase ( self : int):
"""simple docstring"""
return self.__class__(**{k: copy.deepcopy(lowerCAmelCase_) for k, v in self.__dict__.items()})
| 313
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__( self : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : str=7 , lowerCAmelCase_ : Union[str, Any]=3 , lowerCAmelCase_ : Optional[Any]=1_8 , lowerCAmelCase_ : List[Any]=3_0 , lowerCAmelCase_ : Any=4_0_0 , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : str=True , ):
"""simple docstring"""
lowercase_ = size if size is not None else {'''shortest_edge''': 2_0}
lowercase_ = crop_size if crop_size is not None else {'''height''': 1_8, '''width''': 1_8}
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = num_channels
lowercase_ = image_size
lowercase_ = min_resolution
lowercase_ = max_resolution
lowercase_ = do_resize
lowercase_ = size
lowercase_ = do_center_crop
lowercase_ = crop_size
lowercase_ = do_flip_channel_order
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( __snake_case , unittest.TestCase ):
lowercase__ = MobileViTImageProcessor if is_vision_available() else None
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = MobileViTImageProcessingTester(self)
@property
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(a_ , """do_resize"""))
self.assertTrue(hasattr(a_ , """size"""))
self.assertTrue(hasattr(a_ , """do_center_crop"""))
self.assertTrue(hasattr(a_ , """center_crop"""))
self.assertTrue(hasattr(a_ , """do_flip_channel_order"""))
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"""shortest_edge""": 2_0})
self.assertEqual(image_processor.crop_size , {"""height""": 1_8, """width""": 1_8})
lowercase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4)
self.assertEqual(image_processor.size , {"""shortest_edge""": 4_2})
self.assertEqual(image_processor.crop_size , {"""height""": 8_4, """width""": 8_4})
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
pass
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
lowercase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_)
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image)
# Test not batched input
lowercase_ = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowercase_ = image_processing(a_ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
lowercase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_)
for image in image_inputs:
self.assertIsInstance(a_ , np.ndarray)
# Test not batched input
lowercase_ = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowercase_ = image_processing(a_ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
lowercase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_)
for image in image_inputs:
self.assertIsInstance(a_ , torch.Tensor)
# Test not batched input
lowercase_ = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowercase_ = image_processing(a_ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 352
|
"""simple docstring"""
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
UpperCAmelCase : Any = logging.get_logger(__name__)
UpperCAmelCase : Dict = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase : List[Any] = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
UpperCAmelCase : Union[str, Any] = {
"allenai/led-base-16384": 1_6384,
}
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = LEDTokenizer
lowercase__ = ["input_ids", "attention_mask"]
def __init__( self : Dict , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[Any]="replace" , lowerCAmelCase_ : Dict="<s>" , lowerCAmelCase_ : Union[str, Any]="</s>" , lowerCAmelCase_ : List[Any]="</s>" , lowerCAmelCase_ : Optional[Any]="<s>" , lowerCAmelCase_ : Union[str, Any]="<unk>" , lowerCAmelCase_ : List[str]="<pad>" , lowerCAmelCase_ : Dict="<mask>" , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : List[Any]=True , **lowerCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(
lowerCAmelCase_ , lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , errors=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , trim_offsets=lowerCAmelCase_ , **lowerCAmelCase_ , )
lowercase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get("""add_prefix_space""" , lowerCAmelCase_) != add_prefix_space:
lowercase_ = getattr(lowerCAmelCase_ , pre_tok_state.pop("""type"""))
lowercase_ = add_prefix_space
lowercase_ = pre_tok_class(**lowerCAmelCase_)
lowercase_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase_ = """post_processor"""
lowercase_ = getattr(self.backend_tokenizer , lowerCAmelCase_ , lowerCAmelCase_)
if tokenizer_component_instance:
lowercase_ = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase_ = tuple(state["""sep"""])
if "cls" in state:
lowercase_ = tuple(state["""cls"""])
lowercase_ = False
if state.get("""add_prefix_space""" , lowerCAmelCase_) != add_prefix_space:
lowercase_ = add_prefix_space
lowercase_ = True
if state.get("""trim_offsets""" , lowerCAmelCase_) != trim_offsets:
lowercase_ = trim_offsets
lowercase_ = True
if changes_to_apply:
lowercase_ = getattr(lowerCAmelCase_ , state.pop("""type"""))
lowercase_ = component_class(**lowerCAmelCase_)
setattr(self.backend_tokenizer , lowerCAmelCase_ , lowerCAmelCase_)
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""")
return None
return str(self._mask_token)
@mask_token.setter
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : str):
"""simple docstring"""
lowercase_ = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else value
lowercase_ = value
def _UpperCAmelCase ( self : Dict , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : List[Any]):
"""simple docstring"""
lowercase_ = kwargs.get("""is_split_into_words""" , lowerCAmelCase_)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""")
return super()._batch_encode_plus(*lowerCAmelCase_ , **lowerCAmelCase_)
def _UpperCAmelCase ( self : Union[str, Any] , *lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : Any):
"""simple docstring"""
lowercase_ = kwargs.get("""is_split_into_words""" , lowerCAmelCase_)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""")
return super()._encode_plus(*lowerCAmelCase_ , **lowerCAmelCase_)
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None):
"""simple docstring"""
lowercase_ = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_)
return tuple(lowerCAmelCase_)
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any]=None):
"""simple docstring"""
lowercase_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None):
"""simple docstring"""
lowercase_ = [self.sep_token_id]
lowercase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[bool] = None , ):
"""simple docstring"""
lowercase_ = super()._pad(
encoded_inputs=lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding_strategy=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , )
# Load from model defaults
if return_attention_mask is None:
lowercase_ = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowercase_ = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowercase_ = len(encoded_inputs["""global_attention_mask"""]) != len(lowerCAmelCase_)
if needs_to_be_padded:
lowercase_ = len(lowerCAmelCase_) - len(encoded_inputs["""global_attention_mask"""])
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowercase_ = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
lowercase_ = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side))
return encoded_inputs
| 313
| 0
|
"""simple docstring"""
import sys
import turtle
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ) -> Union[str, Any]:
'''simple docstring'''
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(_A , get_mid(_A , _A ) , get_mid(_A , _A ) , depth - 1 )
triangle(_A , get_mid(_A , _A ) , get_mid(_A , _A ) , depth - 1 )
triangle(_A , get_mid(_A , _A ) , get_mid(_A , _A ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"Correct format for using this script: "
"python fractals.py <int:depth_for_fractal>"
)
UpperCAmelCase : str = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("red")
UpperCAmelCase : Optional[int] = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 353
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase : Union[str, Any] = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : int = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
UpperCAmelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 313
| 0
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = 60_08_51_47_51_43 ) -> int:
'''simple docstring'''
try:
lowercase_ = int(__lowerCAmelCase )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
lowercase_ = 2
lowercase_ = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
lowercase_ = i
while n % i == 0:
lowercase_ = n // i
i += 1
return int(__lowerCAmelCase )
if __name__ == "__main__":
print(F"{solution() = }")
| 354
|
"""simple docstring"""
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any]=1_3 , lowerCAmelCase_ : Optional[Any]=7 , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : int=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Tuple=9_9 , lowerCAmelCase_ : List[str]=6_4 , lowerCAmelCase_ : Optional[int]=3_2 , lowerCAmelCase_ : Optional[Any]=5 , lowerCAmelCase_ : Optional[int]=4 , lowerCAmelCase_ : str=3_7 , lowerCAmelCase_ : Any="gelu" , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : int=5_1_2 , lowerCAmelCase_ : Optional[int]=1_6 , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : List[Any]=0.02 , lowerCAmelCase_ : Optional[int]=3 , lowerCAmelCase_ : Optional[Any]=4 , lowerCAmelCase_ : Dict=None , ):
"""simple docstring"""
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_input_mask
lowercase_ = use_token_type_ids
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = embedding_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = num_labels
lowercase_ = num_choices
lowercase_ = scope
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase_ = None
if self.use_input_mask:
lowercase_ = random_attention_mask([self.batch_size, self.seq_length])
lowercase_ = None
if self.use_token_type_ids:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
lowercase_ = None
lowercase_ = None
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
lowercase_ = ids_tensor([self.batch_size] , self.num_choices)
lowercase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict):
"""simple docstring"""
lowercase_ = MegatronBertModel(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_)
lowercase_ = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_)
lowercase_ = model(lowerCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str]):
"""simple docstring"""
lowercase_ = MegatronBertForMaskedLM(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int]):
"""simple docstring"""
lowercase_ = MegatronBertForCausalLM(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
lowercase_ = MegatronBertForNextSentencePrediction(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
lowercase_ = MegatronBertForPreTraining(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , next_sentence_label=lowerCAmelCase_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple):
"""simple docstring"""
lowercase_ = MegatronBertForQuestionAnswering(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str]):
"""simple docstring"""
lowercase_ = self.num_labels
lowercase_ = MegatronBertForSequenceClassification(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict):
"""simple docstring"""
lowercase_ = self.num_labels
lowercase_ = MegatronBertForTokenClassification(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
lowercase_ = self.num_choices
lowercase_ = MegatronBertForMultipleChoice(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
lowercase_ = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
lowercase_ = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
lowercase_ = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
lowercase__ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase__ = (
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase__ = True
# test_resize_embeddings = False
lowercase__ = False
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any]=False):
"""simple docstring"""
lowercase_ = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_)
if return_labels:
if model_class in get_values(lowerCAmelCase_):
lowercase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase_)
lowercase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_)
return inputs_dict
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = MegatronBertModelTester(self)
lowercase_ = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=3_7)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*lowerCAmelCase_)
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*lowerCAmelCase_)
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*lowerCAmelCase_)
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*lowerCAmelCase_)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
return torch.tensor(
__lowerCAmelCase , dtype=torch.long , device=__lowerCAmelCase , )
UpperCAmelCase : Any = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
@unittest.skip("""Model is not available.""")
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = """nvidia/megatron-bert-uncased-345m"""
if "MYDIR" in os.environ:
lowercase_ = os.path.join(os.environ["""MYDIR"""] , lowerCAmelCase_)
lowercase_ = MegatronBertModel.from_pretrained(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.half()
lowercase_ = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]])
with torch.no_grad():
lowercase_ = model(lowerCAmelCase_)[0]
lowercase_ = torch.Size((1, 9, 1_0_2_4))
self.assertEqual(output.shape , lowerCAmelCase_)
lowercase_ = [-0.6_040, -0.2_517, -0.1_025, 0.3_420, -0.6_758, -0.0_017, -0.1_089, -0.1_990, 0.5_728]
for ii in range(3):
for jj in range(3):
lowercase_ = output[0, ii, jj]
lowercase_ = expected[3 * ii + jj]
lowercase_ = """ii={} jj={} a={} b={}""".format(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
self.assertTrue(math.isclose(lowerCAmelCase_ , lowerCAmelCase_ , rel_tol=lowerCAmelCase_ , abs_tol=lowerCAmelCase_) , msg=lowerCAmelCase_)
| 313
| 0
|
"""simple docstring"""
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase=None ) -> Optional[int]:
'''simple docstring'''
lowercase_ = None
if token is not None:
lowercase_ = {"""Accept""": """application/vnd.github+json""", """Authorization""": F'''Bearer {token}'''}
lowercase_ = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
lowercase_ = requests.get(snake_case_ , headers=snake_case_ ).json()
lowercase_ = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
lowercase_ = math.ceil((result["""total_count"""] - 1_00) / 1_00 )
for i in range(snake_case_ ):
lowercase_ = requests.get(url + F'''&page={i + 2}''' , headers=snake_case_ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase=None ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = None
if token is not None:
lowercase_ = {"""Accept""": """application/vnd.github+json""", """Authorization""": F'''Bearer {token}'''}
lowercase_ = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'''
lowercase_ = requests.get(snake_case_ , headers=snake_case_ ).json()
lowercase_ = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
lowercase_ = math.ceil((result["""total_count"""] - 1_00) / 1_00 )
for i in range(snake_case_ ):
lowercase_ = requests.get(url + F'''&page={i + 2}''' , headers=snake_case_ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = None
if token is not None:
lowercase_ = {"""Accept""": """application/vnd.github+json""", """Authorization""": F'''Bearer {token}'''}
lowercase_ = requests.get(snake_case_ , headers=snake_case_ , allow_redirects=snake_case_ )
lowercase_ = result.headers["""Location"""]
lowercase_ = requests.get(snake_case_ , allow_redirects=snake_case_ )
lowercase_ = os.path.join(snake_case_ , F'''{artifact_name}.zip''' )
with open(snake_case_ , """wb""" ) as fp:
fp.write(response.content )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase=None ) -> Dict:
'''simple docstring'''
lowercase_ = []
lowercase_ = []
lowercase_ = None
with zipfile.ZipFile(snake_case_ ) as z:
for filename in z.namelist():
if not os.path.isdir(snake_case_ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(snake_case_ ) as f:
for line in f:
lowercase_ = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
lowercase_ = line[: line.index(""": """ )]
lowercase_ = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
lowercase_ = line[len("""FAILED """ ) :]
failed_tests.append(snake_case_ )
elif filename == "job_name.txt":
lowercase_ = line
if len(snake_case_ ) != len(snake_case_ ):
raise ValueError(
F'''`errors` and `failed_tests` should have the same number of elements. Got {len(snake_case_ )} for `errors` '''
F'''and {len(snake_case_ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'''
""" problem.""" )
lowercase_ = None
if job_name and job_links:
lowercase_ = job_links.get(snake_case_ , snake_case_ )
# A list with elements of the form (line of error, error, failed test)
lowercase_ = [x + [y] + [job_link] for x, y in zip(snake_case_ , snake_case_ )]
return result
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase=None ) -> str:
'''simple docstring'''
lowercase_ = []
lowercase_ = [os.path.join(snake_case_ , snake_case_ ) for p in os.listdir(snake_case_ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(snake_case_ , job_links=snake_case_ ) )
return errors
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase=None ) -> Optional[int]:
'''simple docstring'''
lowercase_ = Counter()
counter.update([x[1] for x in logs] )
lowercase_ = counter.most_common()
lowercase_ = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
lowercase_ = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
lowercase_ = dict(sorted(r.items() , key=lambda __lowerCAmelCase : item[1]["count"] , reverse=snake_case_ ) )
return r
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
lowercase_ = test.split("""/""" )[2]
else:
lowercase_ = None
return test
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase=None ) -> Optional[int]:
'''simple docstring'''
lowercase_ = [(x[0], x[1], get_model(x[2] )) for x in logs]
lowercase_ = [x for x in logs if x[2] is not None]
lowercase_ = {x[2] for x in logs}
lowercase_ = {}
for test in tests:
lowercase_ = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
lowercase_ = counter.most_common()
lowercase_ = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
lowercase_ = sum(error_counts.values() )
if n_errors > 0:
lowercase_ = {"""count""": n_errors, """errors""": error_counts}
lowercase_ = dict(sorted(r.items() , key=lambda __lowerCAmelCase : item[1]["count"] , reverse=snake_case_ ) )
return r
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = """| no. | error | status |"""
lowercase_ = """|-:|:-|:-|"""
lowercase_ = [header, sep]
for error in reduced_by_error:
lowercase_ = reduced_by_error[error]["""count"""]
lowercase_ = F'''| {count} | {error[:1_00]} | |'''
lines.append(snake_case_ )
return "\n".join(snake_case_ )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = """| model | no. of errors | major error | count |"""
lowercase_ = """|-:|-:|-:|-:|"""
lowercase_ = [header, sep]
for model in reduced_by_model:
lowercase_ = reduced_by_model[model]["""count"""]
lowercase_ = list(reduced_by_model[model]["""errors"""].items() )[0]
lowercase_ = F'''| {model} | {count} | {error[:60]} | {_count} |'''
lines.append(snake_case_ )
return "\n".join(snake_case_ )
if __name__ == "__main__":
UpperCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
UpperCAmelCase : Tuple = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
UpperCAmelCase : Any = get_job_links(args.workflow_run_id, token=args.token)
UpperCAmelCase : int = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
UpperCAmelCase : List[Any] = k.find(" / ")
UpperCAmelCase : Tuple = k[index + len(" / ") :]
UpperCAmelCase : Any = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
UpperCAmelCase : Any = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
UpperCAmelCase : Optional[int] = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
UpperCAmelCase : Any = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
UpperCAmelCase : Dict = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
UpperCAmelCase : str = reduce_by_error(errors)
UpperCAmelCase : str = reduce_by_model(errors)
UpperCAmelCase : List[Any] = make_github_table(reduced_by_error)
UpperCAmelCase : Tuple = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 355
|
"""simple docstring"""
from __future__ import annotations
import numpy as np
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> tuple[np.ndarray, np.ndarray]:
'''simple docstring'''
lowercase_ , lowercase_ = np.shape(__lowerCAmelCase )
if rows != columns:
lowercase_ = (
"""'table' has to be of square shaped array but got a """
F'''{rows}x{columns} array:\n{table}'''
)
raise ValueError(__lowerCAmelCase )
lowercase_ = np.zeros((rows, columns) )
lowercase_ = np.zeros((rows, columns) )
for i in range(__lowerCAmelCase ):
for j in range(__lowerCAmelCase ):
lowercase_ = sum(lower[i][k] * upper[k][j] for k in range(__lowerCAmelCase ) )
if upper[j][j] == 0:
raise ArithmeticError("""No LU decomposition exists""" )
lowercase_ = (table[i][j] - total) / upper[j][j]
lowercase_ = 1
for j in range(__lowerCAmelCase , __lowerCAmelCase ):
lowercase_ = sum(lower[i][k] * upper[k][j] for k in range(__lowerCAmelCase ) )
lowercase_ = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313
| 0
|
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
UpperCAmelCase : List[str] = "3"
print("Python version:", sys.version)
print("OS platform:", platform.platform())
print("OS architecture:", platform.machine())
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
except ImportError:
print("Torch version:", None)
try:
import transformers
print("transformers version:", transformers.__version__)
except ImportError:
print("transformers version:", None)
| 356
|
"""simple docstring"""
import os
from collections.abc import Iterator
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = "." ) -> Iterator[str]:
'''simple docstring'''
for dir_path, dir_names, filenames in os.walk(__lowerCAmelCase ):
lowercase_ = [d for d in dir_names if d != """scripts""" and d[0] not in """._"""]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(__lowerCAmelCase )[1] in (".py", ".ipynb"):
yield os.path.join(__lowerCAmelCase , __lowerCAmelCase ).lstrip("""./""" )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return F'''{i * " "}*''' if i else "\n##"
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(__lowerCAmelCase ) or old_parts[i] != new_part) and new_part:
print(F'''{md_prefix(__lowerCAmelCase )} {new_part.replace("_" , " " ).title()}''' )
return new_path
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = "." ) -> None:
'''simple docstring'''
lowercase_ = """"""
for filepath in sorted(good_file_paths(__lowerCAmelCase ) ):
lowercase_ , lowercase_ = os.path.split(__lowerCAmelCase )
if filepath != old_path:
lowercase_ = print_path(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = (filepath.count(os.sep ) + 1) if filepath else 0
lowercase_ = F'''{filepath}/{filename}'''.replace(""" """ , """%20""" )
lowercase_ = os.path.splitext(filename.replace("""_""" , """ """ ).title() )[0]
print(F'''{md_prefix(__lowerCAmelCase )} [{filename}]({url})''' )
if __name__ == "__main__":
print_directory_md(".")
| 313
| 0
|
from functools import lru_cache
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = 2
lowercase_ = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(A__ )
if n > 1:
factors.add(A__ )
return factors
@lru_cache
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Dict:
'''simple docstring'''
return len(unique_prime_factors(A__ ) )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[str]:
'''simple docstring'''
return len(set(A__ ) ) in (0, 1)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ = 2
while True:
# Increment each value of a generated range
lowercase_ = [base + i for i in range(A__ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
lowercase_ = [upf_len(A__ ) for x in group]
checker.append(A__ )
# If all numbers in the list are equal, return the group variable.
if equality(A__ ):
return group
# Increment our base variable by 1
base += 1
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = 4 ) -> str:
'''simple docstring'''
lowercase_ = run(A__ )
return results[0] if len(A__ ) else None
if __name__ == "__main__":
print(solution())
| 357
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> list:
'''simple docstring'''
if any(not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or x < 0 for x in sequence ):
raise TypeError("""Sequence must be list of non-negative integers""" )
for _ in range(len(__lowerCAmelCase ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(__lowerCAmelCase , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 313
| 0
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
return 1 if input_a == input_a else 0
def _SCREAMING_SNAKE_CASE () -> str:
'''simple docstring'''
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 358
|
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCAmelCase : Tuple = "platform"
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , ) -> List[Any]:
'''simple docstring'''
if attention_mask is None:
lowercase_ = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
lowercase_ = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
lowercase_ = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowercase_ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowercase_ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Optional[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int=1_3 , lowerCAmelCase_ : Optional[Any]=7 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : Dict=9_9 , lowerCAmelCase_ : List[Any]=1_6 , lowerCAmelCase_ : Union[str, Any]=2 , lowerCAmelCase_ : str=4 , lowerCAmelCase_ : Dict=4 , lowerCAmelCase_ : List[str]="gelu" , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : str=3_2 , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : Any=1 , lowerCAmelCase_ : str=0 , lowerCAmelCase_ : List[Any]=0.02 , ):
"""simple docstring"""
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = eos_token_id
lowercase_ = pad_token_id
lowercase_ = bos_token_id
lowercase_ = initializer_range
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size) , 3 , self.vocab_size)
lowercase_ = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa)) , -1)
lowercase_ = shift_tokens_right(lowerCAmelCase_ , 1 , 2)
lowercase_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowerCAmelCase_ , )
lowercase_ = prepare_blenderbot_inputs_dict(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
return config, inputs_dict
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ , lowercase_ = self.prepare_config_and_inputs()
return config, inputs_dict
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str):
"""simple docstring"""
lowercase_ = 2_0
lowercase_ = model_class_name(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""])
lowercase_ , lowercase_ = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowercase_ = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""")
lowercase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase_ = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""")
lowercase_ = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = model.decode(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''')
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any]):
"""simple docstring"""
lowercase_ = 2_0
lowercase_ = model_class_name(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""])
lowercase_ , lowercase_ = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowercase_ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
] , axis=-1 , )
lowercase_ = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase_ = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""")
lowercase_ = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = model.decode(lowerCAmelCase_ , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_)
lowercase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''')
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
lowercase__ = 99
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
lowercase_ = input_ids.shape[0]
lowercase_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ , lowercase_ , lowercase_ = self._get_config_and_data()
lowercase_ = FlaxBlenderbotForConditionalGeneration(lowerCAmelCase_)
lowercase_ = lm_model(input_ids=lowerCAmelCase_)
lowercase_ = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
lowercase_ = FlaxBlenderbotForConditionalGeneration(lowerCAmelCase_)
lowercase_ = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa)
lowercase_ = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa)
lowercase_ = lm_model(input_ids=lowerCAmelCase_ , decoder_input_ids=lowerCAmelCase_)
lowercase_ = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa)
lowercase_ = shift_tokens_right(lowerCAmelCase_ , 1 , 2)
lowercase_ = np.equal(lowerCAmelCase_ , 1).astype(np.floataa).sum()
lowercase_ = np.equal(lowerCAmelCase_ , 1).astype(np.floataa).sum()
self.assertEqual(shifted.shape , input_ids.shape)
self.assertEqual(lowerCAmelCase_ , n_pad_before - 1)
self.assertTrue(np.equal(shifted[:, 0] , 2).all())
@require_flax
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , unittest.TestCase , __UpperCAmelCase ):
lowercase__ = True
lowercase__ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowercase__ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = FlaxBlenderbotModelTester(self)
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowercase_ = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = model_class(lowerCAmelCase_)
@jax.jit
def encode_jitted(lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any]=None , **lowerCAmelCase_ : str):
return model.encode(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_)
with self.subTest("""JIT Enabled"""):
lowercase_ = encode_jitted(**lowerCAmelCase_).to_tuple()
with self.subTest("""JIT Disabled"""):
with jax.disable_jit():
lowercase_ = encode_jitted(**lowerCAmelCase_).to_tuple()
self.assertEqual(len(lowerCAmelCase_) , len(lowerCAmelCase_))
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowercase_ = model_class(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""])
lowercase_ = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any]):
return model.decode(
decoder_input_ids=lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , encoder_outputs=lowerCAmelCase_ , )
with self.subTest("""JIT Enabled"""):
lowercase_ = decode_jitted(**lowerCAmelCase_).to_tuple()
with self.subTest("""JIT Disabled"""):
with jax.disable_jit():
lowercase_ = decode_jitted(**lowerCAmelCase_).to_tuple()
self.assertEqual(len(lowerCAmelCase_) , len(lowerCAmelCase_))
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowercase_ = model_class_name.from_pretrained("""facebook/blenderbot-400M-distill""")
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
lowercase_ = np.ones((1, 1)) * model.config.eos_token_id
lowercase_ = model(lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
@unittest.skipUnless(jax_device != """cpu""" , """3B test too slow on CPU.""")
@slow
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = {"""num_beams""": 1, """early_stopping""": True, """min_length""": 1_5, """max_length""": 2_5}
lowercase_ = {"""skip_special_tokens""": True, """clean_up_tokenization_spaces""": True}
lowercase_ = FlaxBlenderbotForConditionalGeneration.from_pretrained("""facebook/blenderbot-3B""" , from_pt=lowerCAmelCase_)
lowercase_ = BlenderbotTokenizer.from_pretrained("""facebook/blenderbot-3B""")
lowercase_ = ["""Sam"""]
lowercase_ = tokenizer(lowerCAmelCase_ , return_tensors="""jax""")
lowercase_ = model.generate(**lowerCAmelCase_ , **lowerCAmelCase_)
lowercase_ = """Sam is a great name. It means \"sun\" in Gaelic."""
lowercase_ = tokenizer.batch_decode(lowerCAmelCase_ , **lowerCAmelCase_)
assert generated_txt[0].strip() == tgt_text
| 313
| 0
|
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase : Tuple = logging.get_logger(__name__)
UpperCAmelCase : Union[str, Any] = {"vocab_file": "spiece.model"}
UpperCAmelCase : Tuple = {
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
}
}
UpperCAmelCase : str = {
"albert-base-v1": 512,
"albert-large-v1": 512,
"albert-xlarge-v1": 512,
"albert-xxlarge-v1": 512,
"albert-base-v2": 512,
"albert-large-v2": 512,
"albert-xlarge-v2": 512,
"albert-xxlarge-v2": 512,
}
UpperCAmelCase : List[str] = "▁"
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : Any="[CLS]" , lowerCAmelCase_ : Optional[Any]="[SEP]" , lowerCAmelCase_ : str="<unk>" , lowerCAmelCase_ : List[str]="[SEP]" , lowerCAmelCase_ : str="<pad>" , lowerCAmelCase_ : List[Any]="[CLS]" , lowerCAmelCase_ : Dict="[MASK]" , lowerCAmelCase_ : Optional[Dict[str, Any]] = None , **lowerCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
lowercase_ = (
AddedToken(a_ , lstrip=a_ , rstrip=a_ , normalized=a_)
if isinstance(a_ , a_)
else mask_token
)
lowercase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=a_ , remove_space=a_ , keep_accents=a_ , bos_token=a_ , eos_token=a_ , unk_token=a_ , sep_token=a_ , pad_token=a_ , cls_token=a_ , mask_token=a_ , sp_model_kwargs=self.sp_model_kwargs , **a_ , )
lowercase_ = do_lower_case
lowercase_ = remove_space
lowercase_ = keep_accents
lowercase_ = vocab_file
lowercase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(a_)
@property
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
return len(self.sp_model)
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = {self.convert_ids_to_tokens(a_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self : Any):
"""simple docstring"""
lowercase_ = self.__dict__.copy()
lowercase_ = None
return state
def __setstate__( self : Optional[Any] , lowerCAmelCase_ : List[str]):
"""simple docstring"""
lowercase_ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs"""):
lowercase_ = {}
lowercase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
if self.remove_space:
lowercase_ = """ """.join(inputs.strip().split())
else:
lowercase_ = inputs
lowercase_ = outputs.replace("""``""" , """\"""").replace("""''""" , """\"""")
if not self.keep_accents:
lowercase_ = unicodedata.normalize("""NFKD""" , a_)
lowercase_ = """""".join([c for c in outputs if not unicodedata.combining(a_)])
if self.do_lower_case:
lowercase_ = outputs.lower()
return outputs
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : str):
"""simple docstring"""
lowercase_ = self.preprocess_text(a_)
lowercase_ = self.sp_model.encode(a_ , out_type=a_)
lowercase_ = []
for piece in pieces:
if len(a_) > 1 and piece[-1] == str(""",""") and piece[-2].isdigit():
lowercase_ = self.sp_model.EncodeAsPieces(piece[:-1].replace(a_ , """"""))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
lowercase_ = cur_pieces[1:]
else:
lowercase_ = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(a_)
else:
new_pieces.append(a_)
return new_pieces
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
return self.sp_model.PieceToId(a_)
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
return self.sp_model.IdToPiece(a_)
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : List[str]):
"""simple docstring"""
lowercase_ = []
lowercase_ = """"""
lowercase_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a_) + token
lowercase_ = True
lowercase_ = []
else:
current_sub_tokens.append(a_)
lowercase_ = False
out_string += self.sp_model.decode(a_)
return out_string.strip()
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None):
"""simple docstring"""
lowercase_ = [self.sep_token_id]
lowercase_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : bool = False):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_)
if token_ids_a is not None:
return [1] + ([0] * len(a_)) + [1] + ([0] * len(a_)) + [1]
return [1] + ([0] * len(a_)) + [1]
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None):
"""simple docstring"""
lowercase_ = [self.sep_token_id]
lowercase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None):
"""simple docstring"""
if not os.path.isdir(a_):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''')
return
lowercase_ = os.path.join(
a_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
if os.path.abspath(self.vocab_file) != os.path.abspath(a_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , a_)
elif not os.path.isfile(self.vocab_file):
with open(a_ , """wb""") as fi:
lowercase_ = self.sp_model.serialized_model_proto()
fi.write(a_)
return (out_vocab_file,)
| 359
|
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
UpperCAmelCase : Dict = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
UpperCAmelCase : Union[str, Any] = 10
UpperCAmelCase : Union[str, Any] = 256
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Optional[MinHash]:
'''simple docstring'''
if len(__lowerCAmelCase ) < MIN_NUM_TOKENS:
return None
lowercase_ = MinHash(num_perm=__lowerCAmelCase )
for token in set(__lowerCAmelCase ):
min_hash.update(token.encode() )
return min_hash
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Set[str]:
'''simple docstring'''
return {t for t in NON_ALPHA.split(__lowerCAmelCase ) if len(t.strip() ) > 0}
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[str] , *,
lowerCAmelCase_ : float = 0.85 , ):
"""simple docstring"""
lowercase_ = duplication_jaccard_threshold
lowercase_ = NUM_PERM
lowercase_ = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm)
lowercase_ = defaultdict(lowerCAmelCase_)
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : MinHash):
"""simple docstring"""
lowercase_ = self._index.query(lowerCAmelCase_)
if code_key in self._index.keys:
print(F'''Duplicate key {code_key}''')
return
self._index.insert(lowerCAmelCase_ , lowerCAmelCase_)
if len(lowerCAmelCase_) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(lowerCAmelCase_)
break
else:
self._duplicate_clusters[close_duplicates[0]].add(lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = []
for base, duplicates in self._duplicate_clusters.items():
lowercase_ = [base] + list(lowerCAmelCase_)
# reformat the cluster to be a list of dict
lowercase_ = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(lowerCAmelCase_)
return duplicate_clusters
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : List[str]):
"""simple docstring"""
lowercase_ = self.get_duplicate_clusters()
with open(lowerCAmelCase_ , """w""") as f:
json.dump(lowerCAmelCase_ , lowerCAmelCase_)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ , lowercase_ = element
lowercase_ = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[str]:
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(__lowerCAmelCase , max_queue_size=1_00_00 ) , chunksize=1_00 , ):
if data is not None:
yield data
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = DuplicationIndex(duplication_jaccard_threshold=__lowerCAmelCase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(__lowerCAmelCase ) ) , max_queue_size=1_00 ) ):
di.add(__lowerCAmelCase , __lowerCAmelCase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> float:
'''simple docstring'''
lowercase_ = get_tokens(__lowerCAmelCase )
lowercase_ = get_tokens(__lowerCAmelCase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
UpperCAmelCase : Optional[Any] = None
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = []
for elementa in cluster:
lowercase_ = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
lowercase_ = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(__lowerCAmelCase , __lowerCAmelCase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
lowercase_ = 1
extremes.append(__lowerCAmelCase )
return extremes
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
global _shared_dataset
lowercase_ = dataset
lowercase_ = []
lowercase_ = partial(_find_cluster_extremes_shared , jaccard_threshold=__lowerCAmelCase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
__lowerCAmelCase , __lowerCAmelCase , ) , total=len(__lowerCAmelCase ) , ):
extremes_list.append(__lowerCAmelCase )
return extremes_list
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
'''simple docstring'''
lowercase_ = make_duplicate_clusters(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
lowercase_ = {}
lowercase_ = find_extremes(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
for extremes in extremes_clusters:
for element in extremes:
lowercase_ = element
lowercase_ = duplicate_indices - set(extreme_dict.keys() )
lowercase_ = dataset.filter(lambda __lowerCAmelCase , __lowerCAmelCase : idx not in remove_indices , with_indices=__lowerCAmelCase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
lowercase_ = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
lowercase_ = extreme_dict[element["""base_index"""]]["""copies"""]
print(F'''Original dataset size: {len(__lowerCAmelCase )}''' )
print(F'''Number of duplicate clusters: {len(__lowerCAmelCase )}''' )
print(F'''Files in duplicate cluster: {len(__lowerCAmelCase )}''' )
print(F'''Unique files in duplicate cluster: {len(__lowerCAmelCase )}''' )
print(F'''Filtered dataset size: {len(__lowerCAmelCase )}''' )
return ds_filter, duplicate_clusters
| 313
| 0
|
"""simple docstring"""
import argparse
import struct
import unittest
class SCREAMING_SNAKE_CASE__ :
def __init__( self : str , lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ = data
# Initialize hash values
lowercase_ = [
0x6A_09E_667,
0xBB_67A_E85,
0x3C_6EF_372,
0xA5_4FF_53A,
0x51_0E5_27F,
0x9B_056_88C,
0x1F_83D_9AB,
0x5B_E0C_D19,
]
# Initialize round constants
lowercase_ = [
0x42_8A2_F98,
0x71_374_491,
0xB5_C0F_BCF,
0xE9_B5D_BA5,
0x39_56C_25B,
0x59_F11_1F1,
0x92_3F8_2A4,
0xAB_1C5_ED5,
0xD8_07A_A98,
0x12_835_B01,
0x24_318_5BE,
0x55_0C7_DC3,
0x72_BE5_D74,
0x80_DEB_1FE,
0x9B_DC0_6A7,
0xC1_9BF_174,
0xE4_9B6_9C1,
0xEF_BE4_786,
0x0F_C19_DC6,
0x24_0CA_1CC,
0x2D_E92_C6F,
0x4A_748_4AA,
0x5C_B0A_9DC,
0x76_F98_8DA,
0x98_3E5_152,
0xA8_31C_66D,
0xB0_032_7C8,
0xBF_597_FC7,
0xC6_E00_BF3,
0xD5_A79_147,
0x06_CA6_351,
0x14_292_967,
0x27_B70_A85,
0x2E_1B2_138,
0x4D_2C6_DFC,
0x53_380_D13,
0x65_0A7_354,
0x76_6A0_ABB,
0x81_C2C_92E,
0x92_722_C85,
0xA2_BFE_8A1,
0xA8_1A6_64B,
0xC2_4B8_B70,
0xC7_6C5_1A3,
0xD1_92E_819,
0xD6_990_624,
0xF4_0E3_585,
0x10_6AA_070,
0x19_A4C_116,
0x1E_376_C08,
0x27_487_74C,
0x34_B0B_CB5,
0x39_1C0_CB3,
0x4E_D8A_A4A,
0x5B_9CC_A4F,
0x68_2E6_FF3,
0x74_8F8_2EE,
0x78_A56_36F,
0x84_C87_814,
0x8C_C70_208,
0x90_BEF_FFA,
0xA4_506_CEB,
0xBE_F9A_3F7,
0xC6_717_8F2,
]
lowercase_ = self.preprocessing(self.data)
self.final_hash()
@staticmethod
def _UpperCAmelCase ( lowerCAmelCase_ : List[Any]):
"""simple docstring"""
lowercase_ = b"""\x80""" + (b"""\x00""" * (6_3 - (len(__lowerCAmelCase) + 8) % 6_4))
lowercase_ = struct.pack(""">Q""" , (len(__lowerCAmelCase) * 8))
return data + padding + big_endian_integer
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = [
self.preprocessed_data[x : x + 6_4]
for x in range(0 , len(self.preprocessed_data) , 6_4)
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
lowercase_ = list(struct.unpack(""">16L""" , __lowerCAmelCase))
# add 48 0-ed integers
words += [0] * 4_8
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = self.hashes
for index in range(0 , 6_4):
if index > 1_5:
# modify the zero-ed indexes at the end of the array
lowercase_ = (
self.ror(words[index - 1_5] , 7)
^ self.ror(words[index - 1_5] , 1_8)
^ (words[index - 1_5] >> 3)
)
lowercase_ = (
self.ror(words[index - 2] , 1_7)
^ self.ror(words[index - 2] , 1_9)
^ (words[index - 2] >> 1_0)
)
lowercase_ = (
words[index - 1_6] + sa + words[index - 7] + sa
) % 0x100_000_000
# Compression
lowercase_ = self.ror(__lowerCAmelCase , 6) ^ self.ror(__lowerCAmelCase , 1_1) ^ self.ror(__lowerCAmelCase , 2_5)
lowercase_ = (e & f) ^ ((~e & 0xFF_FFF_FFF) & g)
lowercase_ = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x100_000_000
lowercase_ = self.ror(__lowerCAmelCase , 2) ^ self.ror(__lowerCAmelCase , 1_3) ^ self.ror(__lowerCAmelCase , 2_2)
lowercase_ = (a & b) ^ (a & c) ^ (b & c)
lowercase_ = (sa + maj) % 0x100_000_000
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = (
g,
f,
e,
((d + tempa) % 0x100_000_000),
c,
b,
a,
((tempa + tempa) % 0x100_000_000),
)
lowercase_ = [a, b, c, d, e, f, g, h]
# Modify final values
lowercase_ = [
((element + mutated_hash_values[index]) % 0x100_000_000)
for index, element in enumerate(self.hashes)
]
lowercase_ = """""".join([hex(__lowerCAmelCase)[2:].zfill(8) for value in self.hashes])
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[int]):
"""simple docstring"""
return 0xFF_FFF_FFF & (value << (3_2 - rotations)) | (value >> rotations)
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
import hashlib
lowercase_ = bytes("""Test String""" , """utf-8""")
self.assertEqual(SHAaaa(__lowerCAmelCase).hash , hashlib.shaaaa(__lowerCAmelCase).hexdigest())
def _SCREAMING_SNAKE_CASE () -> None:
'''simple docstring'''
import doctest
doctest.testmod()
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
"""-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument(
"""-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
lowercase_ = parser.parse_args()
lowercase_ = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
lowercase_ = f.read()
else:
lowercase_ = bytes(lowerCAmelCase__ , """utf-8""" )
print(SHAaaa(lowerCAmelCase__ ).hash )
if __name__ == "__main__":
main()
| 360
|
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
UpperCAmelCase : Union[str, Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["memory_attention", "encoder_attn"],
["attention", "attn"],
["/", "."],
[".LayerNorm.gamma", "_layer_norm.weight"],
[".LayerNorm.beta", "_layer_norm.bias"],
["r.layer_", "r.layers."],
["output_proj", "out_proj"],
["ffn.dense_1.", "fc2."],
["ffn.dense.", "fc1."],
["ffn_layer_norm", "final_layer_norm"],
["kernel", "weight"],
["encoder_layer_norm.", "encoder.layer_norm."],
["decoder_layer_norm.", "decoder.layer_norm."],
["embeddings.weights", "shared.weight"],
]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
lowercase_ = k.replace(__lowerCAmelCase , __lowerCAmelCase )
return k
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> PegasusForConditionalGeneration:
'''simple docstring'''
lowercase_ = DEFAULTS.copy()
cfg_kwargs.update(__lowerCAmelCase )
lowercase_ = PegasusConfig(**__lowerCAmelCase )
lowercase_ = PegasusForConditionalGeneration(__lowerCAmelCase )
lowercase_ = torch_model.model.state_dict()
lowercase_ = {}
for k, v in tf_weights.items():
lowercase_ = rename_state_dict_key(__lowerCAmelCase )
if new_k not in sd:
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
lowercase_ = v.T
lowercase_ = torch.tensor(__lowerCAmelCase , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
lowercase_ = torch.zeros_like(mapping["""shared.weight"""][cfg.pad_token_id + 1] )
lowercase_ = mapping["""shared.weight"""]
lowercase_ = mapping["""shared.weight"""]
lowercase_ = {k: torch.zeros_like(__lowerCAmelCase ) for k, v in sd.items() if k.endswith("""bias""" ) and k not in mapping}
mapping.update(**__lowerCAmelCase )
lowercase_ , lowercase_ = torch_model.model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
lowercase_ = [
k for k in missing if k not in ["""encoder.embed_positions.weight""", """decoder.embed_positions.weight"""]
]
assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], F'''no matches found for the following tf keys {extra}'''
return torch_model
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
'''simple docstring'''
lowercase_ = tf.train.list_variables(__lowerCAmelCase )
lowercase_ = {}
lowercase_ = ["""Adafactor""", """global_step"""]
for name, shape in tqdm(__lowerCAmelCase , desc="""converting tf checkpoint to dict""" ):
lowercase_ = any(pat in name for pat in ignore_name )
if skip_key:
continue
lowercase_ = tf.train.load_variable(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = array
return tf_weights
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = Path(__lowerCAmelCase ).parent.name
lowercase_ = task_specific_params[F'''summarization_{dataset}''']["""max_position_embeddings"""]
lowercase_ = PegasusTokenizer.from_pretrained("""sshleifer/pegasus""" , model_max_length=__lowerCAmelCase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(__lowerCAmelCase )
# convert model
lowercase_ = get_tf_weights_as_numpy(__lowerCAmelCase )
lowercase_ = task_specific_params[F'''summarization_{dataset}''']
if dataset == "large":
lowercase_ = task_specific_params
lowercase_ = convert_pegasus(__lowerCAmelCase , __lowerCAmelCase )
torch_model.save_pretrained(__lowerCAmelCase )
lowercase_ = torch_model.state_dict()
sd.pop("""model.decoder.embed_positions.weight""" )
sd.pop("""model.encoder.embed_positions.weight""" )
torch.save(__lowerCAmelCase , Path(__lowerCAmelCase ) / """pytorch_model.bin""" )
if __name__ == "__main__":
UpperCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
UpperCAmelCase : List[Any] = parser.parse_args()
if args.save_dir is None:
UpperCAmelCase : List[str] = Path(args.tf_ckpt_path).parent.name
UpperCAmelCase : int = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 313
| 0
|
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(_snake_case , """embed_dim"""))
self.parent.assertTrue(hasattr(_snake_case , """num_heads"""))
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any]=1_3 , lowerCAmelCase_ : List[Any]=6_4 , lowerCAmelCase_ : Optional[Any]=3 , lowerCAmelCase_ : List[Any]=[1_6, 4_8, 9_6] , lowerCAmelCase_ : str=[1, 3, 6] , lowerCAmelCase_ : Tuple=[1, 2, 1_0] , lowerCAmelCase_ : List[str]=[7, 3, 3] , lowerCAmelCase_ : List[str]=[4, 2, 2] , lowerCAmelCase_ : Dict=[2, 1, 1] , lowerCAmelCase_ : Optional[Any]=[2, 2, 2] , lowerCAmelCase_ : Dict=[False, False, True] , lowerCAmelCase_ : List[str]=[0.0, 0.0, 0.0] , lowerCAmelCase_ : List[str]=0.02 , lowerCAmelCase_ : Tuple=1E-12 , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : int=2 , ):
"""simple docstring"""
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = image_size
lowercase_ = patch_sizes
lowercase_ = patch_stride
lowercase_ = patch_padding
lowercase_ = is_training
lowercase_ = use_labels
lowercase_ = num_labels
lowercase_ = num_channels
lowercase_ = embed_dim
lowercase_ = num_heads
lowercase_ = stride_kv
lowercase_ = depth
lowercase_ = cls_token
lowercase_ = attention_drop_rate
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
lowercase_ = None
if self.use_labels:
# create a random int32 tensor of given shape
lowercase_ = ids_tensor([self.batch_size] , self.num_labels)
lowercase_ = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self : int):
"""simple docstring"""
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict):
"""simple docstring"""
lowercase_ = TFCvtModel(config=_snake_case)
lowercase_ = model(_snake_case , training=_snake_case)
lowercase_ = (self.image_size, self.image_size)
lowercase_ , lowercase_ = image_size[0], image_size[1]
for i in range(len(self.depth)):
lowercase_ = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1)
lowercase_ = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width))
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str]):
"""simple docstring"""
lowercase_ = self.num_labels
lowercase_ = TFCvtForImageClassification(_snake_case)
lowercase_ = model(_snake_case , labels=_snake_case , training=_snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ = config_and_inputs
lowercase_ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
lowercase__ = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
lowercase__ = (
{"feature-extraction": TFCvtModel, "image-classification": TFCvtForImageClassification}
if is_tf_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = TFCvtModelTester(self)
lowercase_ = TFCvtConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case , hidden_size=3_7)
def _UpperCAmelCase ( self : str):
"""simple docstring"""
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="""Cvt does not output attentions""")
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
pass
@unittest.skip(reason="""Cvt does not use inputs_embeds""")
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
pass
@unittest.skip(reason="""Cvt does not support input and output embeddings""")
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""")) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""")) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
super().test_keras_fit()
@unittest.skip(reason="""Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8""")
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = tf.keras.mixed_precision.Policy("""mixed_float16""")
tf.keras.mixed_precision.set_global_policy(_snake_case)
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("""float32""")
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(_snake_case)
lowercase_ = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _snake_case)
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Union[str, Any]):
lowercase_ = model_class(_snake_case)
lowercase_ = model(**self._prepare_for_class(_snake_case , _snake_case))
lowercase_ = outputs.hidden_states
lowercase_ = len(self.model_tester.depth)
self.assertEqual(len(_snake_case) , _snake_case)
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:]) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case)
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case)
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case)
@slow
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = TFCvtModel.from_pretrained(_snake_case)
self.assertIsNotNone(_snake_case)
def _SCREAMING_SNAKE_CASE () -> Optional[int]:
'''simple docstring'''
lowercase_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0])
@slow
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0])
lowercase_ = self.default_image_processor
lowercase_ = prepare_img()
lowercase_ = image_processor(images=_snake_case , return_tensors="""tf""")
# forward pass
lowercase_ = model(**_snake_case)
# verify the logits
lowercase_ = tf.TensorShape((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , _snake_case)
lowercase_ = tf.constant([0.9_285, 0.9_015, -0.3_150])
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _snake_case , atol=1E-4))
| 361
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
def _SCREAMING_SNAKE_CASE () -> Generator[int, None, None]:
'''simple docstring'''
lowercase_ = {}
lowercase_ = 2
while True:
lowercase_ = factor_map.pop(__lowerCAmelCase , __lowerCAmelCase )
if factor:
lowercase_ = factor + prime
while x in factor_map:
x += factor
lowercase_ = factor
else:
lowercase_ = prime
yield prime
prime += 1
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = 1E10 ) -> int:
'''simple docstring'''
lowercase_ = sieve()
lowercase_ = 1
while True:
lowercase_ = next(__lowerCAmelCase )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(__lowerCAmelCase )
n += 2
if __name__ == "__main__":
print(solution())
| 313
| 0
|
"""simple docstring"""
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
UpperCAmelCase : int = logging.getLogger(__name__)
require_version("pytorch_lightning>=1.0.4")
UpperCAmelCase : Tuple = {
"base": AutoModel,
"sequence-classification": AutoModelForSequenceClassification,
"question-answering": AutoModelForQuestionAnswering,
"pretraining": AutoModelForPreTraining,
"token-classification": AutoModelForTokenClassification,
"language-modeling": AutoModelWithLMHead,
"summarization": AutoModelForSeqaSeqLM,
"translation": AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
UpperCAmelCase : int = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
UpperCAmelCase : Any = sorted(arg_to_scheduler.keys())
UpperCAmelCase : Union[str, Any] = "{" + ", ".join(arg_to_scheduler_choices) + "}"
class SCREAMING_SNAKE_CASE__ ( pl.LightningModule ):
def __init__( self : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : str="base" , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : str=None , **lowerCAmelCase_ : Any , ):
"""simple docstring"""
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(lowerCAmelCase_)
lowercase_ = 0
lowercase_ = Path(self.hparams.output_dir)
lowercase_ = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
lowercase_ = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({"""num_labels""": num_labels} if num_labels is not None else {}) , cache_dir=lowerCAmelCase_ , **lowerCAmelCase_ , )
else:
lowercase_ = config
lowercase_ = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(self.hparams , lowerCAmelCase_ , lowerCAmelCase_):
assert hasattr(self.config , lowerCAmelCase_), F'''model config doesn\'t have a `{p}` attribute'''
setattr(self.config , lowerCAmelCase_ , getattr(self.hparams , lowerCAmelCase_))
if tokenizer is None:
lowercase_ = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=lowerCAmelCase_ , )
else:
lowercase_ = tokenizer
lowercase_ = MODEL_MODES[mode]
if model is None:
lowercase_ = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool(""".ckpt""" in self.hparams.model_name_or_path) , config=self.config , cache_dir=lowerCAmelCase_ , )
else:
lowercase_ = model
def _UpperCAmelCase ( self : List[Any] , *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : Optional[int]):
"""simple docstring"""
lowercase_ = self.model_type.from_pretrained(*lowerCAmelCase_ , **lowerCAmelCase_)
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
lowercase_ = arg_to_scheduler[self.hparams.lr_scheduler]
lowercase_ = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps())
lowercase_ = {"""scheduler""": scheduler, """interval""": """step""", """frequency""": 1}
return scheduler
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = self.model
lowercase_ = ["""bias""", """LayerNorm.weight"""]
lowercase_ = [
{
"""params""": [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)
], # check this named paramters
"""weight_decay""": self.hparams.weight_decay,
},
{
"""params""": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"""weight_decay""": 0.0,
},
]
if self.hparams.adafactor:
lowercase_ = Adafactor(
lowerCAmelCase_ , lr=self.hparams.learning_rate , scale_parameter=lowerCAmelCase_ , relative_step=lowerCAmelCase_)
else:
lowercase_ = AdamW(
lowerCAmelCase_ , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon)
lowercase_ = optimizer
lowercase_ = self.get_lr_scheduler()
return [optimizer], [scheduler]
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict):
"""simple docstring"""
return self.validation_step(lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : List[str]):
"""simple docstring"""
return self.validation_end(lowerCAmelCase_)
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = max(1 , self.hparams.gpus) # TODO: consider num_tpu_cores
lowercase_ = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : str):
"""simple docstring"""
if stage == "test":
lowercase_ = len(self.test_dataloader().dataset)
else:
lowercase_ = self.get_dataloader("""train""" , self.hparams.train_batch_size , shuffle=lowerCAmelCase_)
lowercase_ = len(self.train_dataloader().dataset)
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any = False):
"""simple docstring"""
raise NotImplementedError("""You must implement this for your task""")
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
return self.train_loader
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
return self.get_dataloader("""dev""" , self.hparams.eval_batch_size , shuffle=lowerCAmelCase_)
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
return self.get_dataloader("""test""" , self.hparams.eval_batch_size , shuffle=lowerCAmelCase_)
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Dict):
"""simple docstring"""
return os.path.join(
self.hparams.data_dir , """cached_{}_{}_{}""".format(
lowerCAmelCase_ , list(filter(lowerCAmelCase_ , self.hparams.model_name_or_path.split("""/"""))).pop() , str(self.hparams.max_seq_length) , ) , )
@pl.utilities.rank_zero_only
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ = self.output_dir.joinpath("""best_tfmr""")
lowercase_ = self.step_count
self.model.save_pretrained(lowerCAmelCase_)
self.tokenizer.save_pretrained(lowerCAmelCase_)
@staticmethod
def _UpperCAmelCase ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int]):
"""simple docstring"""
parser.add_argument(
"""--model_name_or_path""" , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--config_name""" , default="""""" , type=lowerCAmelCase_ , help="""Pretrained config name or path if not the same as model_name""")
parser.add_argument(
"""--tokenizer_name""" , default=lowerCAmelCase_ , type=lowerCAmelCase_ , help="""Pretrained tokenizer name or path if not the same as model_name""" , )
parser.add_argument(
"""--cache_dir""" , default=str(Path(lowerCAmelCase_).parent / """test_run""" / """cache""") , type=lowerCAmelCase_ , help="""Where do you want to store the pre-trained models downloaded from huggingface.co""" , )
parser.add_argument(
"""--encoder_layerdrop""" , type=lowerCAmelCase_ , help="""Encoder layer dropout probability (Optional). Goes into model.config""" , )
parser.add_argument(
"""--decoder_layerdrop""" , type=lowerCAmelCase_ , help="""Decoder layer dropout probability (Optional). Goes into model.config""" , )
parser.add_argument(
"""--dropout""" , type=lowerCAmelCase_ , help="""Dropout probability (Optional). Goes into model.config""" , )
parser.add_argument(
"""--attention_dropout""" , type=lowerCAmelCase_ , help="""Attention dropout probability (Optional). Goes into model.config""" , )
parser.add_argument("""--learning_rate""" , default=5E-5 , type=lowerCAmelCase_ , help="""The initial learning rate for Adam.""")
parser.add_argument(
"""--lr_scheduler""" , default="""linear""" , choices=lowerCAmelCase_ , metavar=lowerCAmelCase_ , type=lowerCAmelCase_ , help="""Learning rate scheduler""" , )
parser.add_argument("""--weight_decay""" , default=0.0 , type=lowerCAmelCase_ , help="""Weight decay if we apply some.""")
parser.add_argument("""--adam_epsilon""" , default=1E-8 , type=lowerCAmelCase_ , help="""Epsilon for Adam optimizer.""")
parser.add_argument("""--warmup_steps""" , default=0 , type=lowerCAmelCase_ , help="""Linear warmup over warmup_steps.""")
parser.add_argument("""--num_workers""" , default=4 , type=lowerCAmelCase_ , help="""kwarg passed to DataLoader""")
parser.add_argument("""--num_train_epochs""" , dest="""max_epochs""" , default=3 , type=lowerCAmelCase_)
parser.add_argument("""--train_batch_size""" , default=3_2 , type=lowerCAmelCase_)
parser.add_argument("""--eval_batch_size""" , default=3_2 , type=lowerCAmelCase_)
parser.add_argument("""--adafactor""" , action="""store_true""")
class SCREAMING_SNAKE_CASE__ ( pl.Callback ):
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any):
"""simple docstring"""
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class SCREAMING_SNAKE_CASE__ ( pl.Callback ):
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int]):
"""simple docstring"""
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(lowerCAmelCase_)
class SCREAMING_SNAKE_CASE__ ( pl.Callback ):
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str]):
"""simple docstring"""
lowercase_ = trainer.lr_schedulers[0]["""scheduler"""]
lowercase_ = {F'''lr_group_{i}''': lr for i, lr in enumerate(lr_scheduler.get_lr())}
pl_module.logger.log_metrics(lowerCAmelCase_)
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int):
"""simple docstring"""
rank_zero_info("""***** Validation results *****""")
lowercase_ = trainer.callback_metrics
# Log results
for key in sorted(lowerCAmelCase_):
if key not in ["log", "progress_bar"]:
rank_zero_info("""{} = {}\n""".format(lowerCAmelCase_ , str(metrics[key])))
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any):
"""simple docstring"""
rank_zero_info("""***** Test results *****""")
lowercase_ = trainer.callback_metrics
# Log and save results to file
lowercase_ = os.path.join(pl_module.hparams.output_dir , """test_results.txt""")
with open(lowerCAmelCase_ , """w""") as writer:
for key in sorted(lowerCAmelCase_):
if key not in ["log", "progress_bar"]:
rank_zero_info("""{} = {}\n""".format(lowerCAmelCase_ , str(metrics[key])))
writer.write("""{} = {}\n""".format(lowerCAmelCase_ , str(metrics[key])))
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> None:
'''simple docstring'''
parser.add_argument(
"""--output_dir""" , default=str(Path(lowercase_ ).parent / """test_run""" / """model_checkpoints""" ) , type=lowercase_ , help="""The output directory where the model predictions and checkpoints will be written.""" , )
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=lowercase_ , default="""O2""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_tpu_cores""" , dest="""tpu_cores""" , type=lowercase_ )
parser.add_argument("""--max_grad_norm""" , dest="""gradient_clip_val""" , default=1.0 , type=lowercase_ , help="""Max gradient norm""" )
parser.add_argument("""--do_train""" , action="""store_true""" , help="""Whether to run training.""" )
parser.add_argument("""--do_predict""" , action="""store_true""" , help="""Whether to run predictions on the test set.""" )
parser.add_argument(
"""--gradient_accumulation_steps""" , dest="""accumulate_grad_batches""" , type=lowercase_ , default=1 , help="""Number of updates steps to accumulate before performing a backward/update pass.""" , )
parser.add_argument("""--seed""" , type=lowercase_ , default=42 , help="""random seed for initialization""" )
parser.add_argument(
"""--data_dir""" , default=str(Path(lowercase_ ).parent / """test_run""" / """dummy-train-data""" ) , type=lowercase_ , help="""The input data dir. Should contain the training files for the CoNLL-2003 NER task.""" , )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=[] , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase , ) -> int:
'''simple docstring'''
pl.seed_everything(args.seed )
# init model
lowercase_ = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=lowercase_ )
# add custom checkpoints
if checkpoint_callback is None:
lowercase_ = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix="""checkpoint""" , monitor="""val_loss""" , mode="""min""" , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(lowercase_ )
if logging_callback is None:
lowercase_ = LoggingCallback()
lowercase_ = {}
if args.fpaa:
lowercase_ = 16
if args.gpus > 1:
lowercase_ = """auto"""
lowercase_ = """ddp"""
lowercase_ = args.accumulate_grad_batches
lowercase_ = None
lowercase_ = """auto"""
lowercase_ = pl.Trainer.from_argparse_args(
lowercase_ , weights_summary=lowercase_ , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowercase_ , val_check_interval=1 , num_sanity_val_steps=2 , **lowercase_ , )
if args.do_train:
trainer.fit(lowercase_ )
else:
print("""RAG modeling tests with new set functions successfuly executed!""" )
return trainer
| 362
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Any , lowerCAmelCase_ : int = 6):
"""simple docstring"""
lowercase_ = None
lowercase_ = None
self.create_linked_list(lowerCAmelCase_)
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ = Node()
lowercase_ = current_node
lowercase_ = current_node
lowercase_ = current_node
for _ in range(1 , lowerCAmelCase_):
lowercase_ = Node()
lowercase_ = current_node
lowercase_ = previous_node
lowercase_ = current_node
lowercase_ = self.front
lowercase_ = previous_node
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
self.check_can_perform_operation()
return self.front.data if self.front else None
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : Any):
"""simple docstring"""
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
lowercase_ = self.rear.next
if self.rear:
lowercase_ = data
def _UpperCAmelCase ( self : str):
"""simple docstring"""
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
lowercase_ = self.front.data
lowercase_ = None
return data
lowercase_ = self.front
lowercase_ = old_front.next
lowercase_ = old_front.data
lowercase_ = None
return data
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
if self.is_empty():
raise Exception("""Empty Queue""")
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""")
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[str]):
"""simple docstring"""
lowercase_ = None
lowercase_ = None
lowercase_ = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313
| 0
|
"""simple docstring"""
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[Any] , lowerCAmelCase_ : list[int]):
"""simple docstring"""
lowercase_ = len(_a)
lowercase_ = [0] * len_array
if len_array > 0:
lowercase_ = array[0]
for i in range(1 , _a):
lowercase_ = self.prefix_sum[i - 1] + array[i]
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int):
"""simple docstring"""
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(_a)
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363
|
"""simple docstring"""
from collections.abc import Sequence
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase = False ) -> float:
'''simple docstring'''
if not arr:
return 0
lowercase_ = 0 if allow_empty_subarrays else float("""-inf""" )
lowercase_ = 0.0
for num in arr:
lowercase_ = max(0 if allow_empty_subarrays else num , curr_sum + num )
lowercase_ = max(__lowerCAmelCase , __lowerCAmelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase : Union[str, Any] = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F"{max_subarray_sum(nums) = }")
| 313
| 0
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _SCREAMING_SNAKE_CASE () -> int:
'''simple docstring'''
lowercase_ = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=_a , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=_a , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=_a )
return parser.parse_args()
def _SCREAMING_SNAKE_CASE () -> Dict:
'''simple docstring'''
lowercase_ = parse_args()
# Import training_script as a module.
lowercase_ = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowercase_ = script_fpath.stem
lowercase_ = importlib.import_module(_a )
# Patch sys.argv
lowercase_ = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 364
|
"""simple docstring"""
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
UpperCAmelCase : Optional[int] = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[Any]=None):
"""simple docstring"""
lowercase_ = self.layer[current_layer](lowerCAmelCase_ , lowerCAmelCase_ , head_mask[current_layer])
lowercase_ = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , __UpperCAmelCase , )
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def __init__( self : Any , lowerCAmelCase_ : Dict):
"""simple docstring"""
super().__init__(lowerCAmelCase_)
lowercase_ = BertEncoderWithPabee(lowerCAmelCase_)
self.init_weights()
lowercase_ = 0
lowercase_ = 0
lowercase_ = 0
lowercase_ = 0
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ = threshold
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
lowercase_ = patience
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = 0
lowercase_ = 0
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = self.inference_layers_num / self.inference_instances_num
lowercase_ = (
F'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='''
F''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'''
)
print(lowerCAmelCase_)
@add_start_docstrings_to_model_forward(lowerCAmelCase_)
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : str=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[Any]=False , ):
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""")
elif input_ids is not None:
lowercase_ = input_ids.size()
elif inputs_embeds is not None:
lowercase_ = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""")
lowercase_ = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
lowercase_ = torch.ones(lowerCAmelCase_ , device=lowerCAmelCase_)
if token_type_ids is None:
lowercase_ = torch.zeros(lowerCAmelCase_ , dtype=torch.long , device=lowerCAmelCase_)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
lowercase_ = self.get_extended_attention_mask(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
lowercase_ , lowercase_ , lowercase_ = encoder_hidden_states.size()
lowercase_ = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
lowercase_ = torch.ones(lowerCAmelCase_ , device=lowerCAmelCase_)
lowercase_ = self.invert_attention_mask(lowerCAmelCase_)
else:
lowercase_ = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
lowercase_ = self.get_head_mask(lowerCAmelCase_ , self.config.num_hidden_layers)
lowercase_ = self.embeddings(
input_ids=lowerCAmelCase_ , position_ids=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , inputs_embeds=lowerCAmelCase_)
lowercase_ = embedding_output
if self.training:
lowercase_ = []
for i in range(self.config.num_hidden_layers):
lowercase_ = self.encoder.adaptive_forward(
lowerCAmelCase_ , current_layer=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , head_mask=lowerCAmelCase_)
lowercase_ = self.pooler(lowerCAmelCase_)
lowercase_ = output_layers[i](output_dropout(lowerCAmelCase_))
res.append(lowerCAmelCase_)
elif self.patience == 0: # Use all layers for inference
lowercase_ = self.encoder(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , )
lowercase_ = self.pooler(encoder_outputs[0])
lowercase_ = [output_layers[self.config.num_hidden_layers - 1](lowerCAmelCase_)]
else:
lowercase_ = 0
lowercase_ = None
lowercase_ = 0
for i in range(self.config.num_hidden_layers):
calculated_layer_num += 1
lowercase_ = self.encoder.adaptive_forward(
lowerCAmelCase_ , current_layer=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , head_mask=lowerCAmelCase_)
lowercase_ = self.pooler(lowerCAmelCase_)
lowercase_ = output_layers[i](lowerCAmelCase_)
if regression:
lowercase_ = logits.detach()
if patient_result is not None:
lowercase_ = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels) < self.regression_threshold:
patient_counter += 1
else:
lowercase_ = 0
else:
lowercase_ = logits.detach().argmax(dim=1)
if patient_result is not None:
lowercase_ = patient_result.detach().argmax(dim=1)
if (patient_result is not None) and torch.all(labels.eq(lowerCAmelCase_)):
patient_counter += 1
else:
lowercase_ = 0
lowercase_ = logits
if patient_counter == self.patience:
break
lowercase_ = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , __UpperCAmelCase , )
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def __init__( self : Optional[Any] , lowerCAmelCase_ : str):
"""simple docstring"""
super().__init__(lowerCAmelCase_)
lowercase_ = config.num_labels
lowercase_ = BertModelWithPabee(lowerCAmelCase_)
lowercase_ = nn.Dropout(config.hidden_dropout_prob)
lowercase_ = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels) for _ in range(config.num_hidden_layers)])
self.init_weights()
@add_start_docstrings_to_model_forward(lowerCAmelCase_)
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : List[str]=None , ):
"""simple docstring"""
lowercase_ = self.bert(
input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , position_ids=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , inputs_embeds=lowerCAmelCase_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
lowercase_ = (logits[-1],)
if labels is not None:
lowercase_ = None
lowercase_ = 0
for ix, logits_item in enumerate(lowerCAmelCase_):
if self.num_labels == 1:
# We are doing regression
lowercase_ = MSELoss()
lowercase_ = loss_fct(logits_item.view(-1) , labels.view(-1))
else:
lowercase_ = CrossEntropyLoss()
lowercase_ = loss_fct(logits_item.view(-1 , self.num_labels) , labels.view(-1))
if total_loss is None:
lowercase_ = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
lowercase_ = (total_loss / total_weights,) + outputs
return outputs
| 313
| 0
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Dict:
'''simple docstring'''
if "cls_token" in name:
lowercase_ = name.replace("""cls_token""" , """vit.embeddings.cls_token""" )
if "mask_token" in name:
lowercase_ = name.replace("""mask_token""" , """decoder.mask_token""" )
if "decoder_pos_embed" in name:
lowercase_ = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
lowercase_ = name.replace("""pos_embed""" , """vit.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
lowercase_ = name.replace("""patch_embed.proj""" , """vit.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowercase_ = name.replace("""patch_embed.norm""" , """vit.embeddings.norm""" )
if "decoder_blocks" in name:
lowercase_ = name.replace("""decoder_blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
lowercase_ = name.replace("""blocks""" , """vit.encoder.layer""" )
if "attn.proj" in name:
lowercase_ = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
lowercase_ = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowercase_ = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowercase_ = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowercase_ = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowercase_ = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
lowercase_ = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
lowercase_ = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
lowercase_ = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name:
lowercase_ = name.replace("""norm.weight""" , """vit.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name:
lowercase_ = name.replace("""norm.bias""" , """vit.layernorm.bias""" )
return name
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> int:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowercase_ = orig_state_dict.pop(_UpperCAmelCase )
if "qkv" in key:
lowercase_ = key.split(""".""" )
lowercase_ = int(key_split[1] )
if "decoder_blocks" in key:
lowercase_ = config.decoder_hidden_size
lowercase_ = "decoder.decoder_layers."
if "weight" in key:
lowercase_ = val[:dim, :]
lowercase_ = val[dim : dim * 2, :]
lowercase_ = val[-dim:, :]
elif "bias" in key:
lowercase_ = val[:dim]
lowercase_ = val[dim : dim * 2]
lowercase_ = val[-dim:]
else:
lowercase_ = config.hidden_size
lowercase_ = "vit.encoder.layer."
if "weight" in key:
lowercase_ = val[:dim, :]
lowercase_ = val[dim : dim * 2, :]
lowercase_ = val[-dim:, :]
elif "bias" in key:
lowercase_ = val[:dim]
lowercase_ = val[dim : dim * 2]
lowercase_ = val[-dim:]
else:
lowercase_ = val
return orig_state_dict
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = ViTMAEConfig()
if "large" in checkpoint_url:
lowercase_ = 10_24
lowercase_ = 40_96
lowercase_ = 24
lowercase_ = 16
elif "huge" in checkpoint_url:
lowercase_ = 14
lowercase_ = 12_80
lowercase_ = 51_20
lowercase_ = 32
lowercase_ = 16
lowercase_ = ViTMAEForPreTraining(_UpperCAmelCase )
lowercase_ = torch.hub.load_state_dict_from_url(_UpperCAmelCase , map_location="""cpu""" )["model"]
lowercase_ = ViTMAEImageProcessor(size=config.image_size )
lowercase_ = convert_state_dict(_UpperCAmelCase , _UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
model.eval()
lowercase_ = "https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"
lowercase_ = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
lowercase_ = ViTMAEImageProcessor(size=config.image_size )
lowercase_ = image_processor(images=_UpperCAmelCase , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
lowercase_ = model(**_UpperCAmelCase )
lowercase_ = outputs.logits
if "large" in checkpoint_url:
lowercase_ = torch.tensor(
[[-0.7_309, -0.7_128, -1.0_169], [-1.0_161, -0.9_058, -1.1_878], [-1.0_478, -0.9_411, -1.1_911]] )
elif "huge" in checkpoint_url:
lowercase_ = torch.tensor(
[[-1.1_599, -0.9_199, -1.2_221], [-1.1_952, -0.9_269, -1.2_307], [-1.2_143, -0.9_337, -1.2_262]] )
else:
lowercase_ = torch.tensor(
[[-0.9_192, -0.8_481, -1.1_259], [-1.1_349, -1.0_034, -1.2_599], [-1.1_757, -1.0_429, -1.2_726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , _UpperCAmelCase , atol=1E-4 )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_UpperCAmelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
UpperCAmelCase : Dict = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 365
|
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Any:
'''simple docstring'''
if (
(cp >= 0x4e00 and cp <= 0x9fff)
or (cp >= 0x3400 and cp <= 0x4dbf) #
or (cp >= 0x20000 and cp <= 0x2a6df) #
or (cp >= 0x2a700 and cp <= 0x2b73f) #
or (cp >= 0x2b740 and cp <= 0x2b81f) #
or (cp >= 0x2b820 and cp <= 0x2ceaf) #
or (cp >= 0xf900 and cp <= 0xfaff)
or (cp >= 0x2f800 and cp <= 0x2fa1f) #
): #
return True
return False
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[str]:
'''simple docstring'''
for char in word:
lowercase_ = ord(__lowerCAmelCase )
if not _is_chinese_char(__lowerCAmelCase ):
return 0
return 1
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Any:
'''simple docstring'''
lowercase_ = set()
for token in tokens:
lowercase_ = len(__lowerCAmelCase ) > 1 and is_chinese(__lowerCAmelCase )
if chinese_word:
word_set.add(__lowerCAmelCase )
lowercase_ = list(__lowerCAmelCase )
return word_list
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
lowercase_ = max([len(__lowerCAmelCase ) for w in chinese_word_set] )
lowercase_ = bert_tokens
lowercase_ , lowercase_ = 0, len(__lowerCAmelCase )
while start < end:
lowercase_ = True
if is_chinese(bert_word[start] ):
lowercase_ = min(end - start , __lowerCAmelCase )
for i in range(__lowerCAmelCase , 1 , -1 ):
lowercase_ = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
lowercase_ = """##""" + bert_word[j]
lowercase_ = start + i
lowercase_ = False
break
if single_word:
start += 1
return bert_word
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = []
for i in range(0 , len(__lowerCAmelCase ) , 1_00 ):
lowercase_ = ltp_tokenizer.seg(lines[i : i + 1_00] )[0]
lowercase_ = [get_chinese_word(__lowerCAmelCase ) for r in res]
ltp_res.extend(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
lowercase_ = []
for i in range(0 , len(__lowerCAmelCase ) , 1_00 ):
lowercase_ = bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=5_12 )
bert_res.extend(res["""input_ids"""] )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
lowercase_ = []
for input_ids, chinese_word in zip(__lowerCAmelCase , __lowerCAmelCase ):
lowercase_ = []
for id in input_ids:
lowercase_ = bert_tokenizer._convert_id_to_token(__lowerCAmelCase )
input_tokens.append(__lowerCAmelCase )
lowercase_ = add_sub_symbol(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__lowerCAmelCase ):
if token[:2] == "##":
lowercase_ = token[2:]
# save chinese tokens' pos
if len(__lowerCAmelCase ) == 1 and _is_chinese_char(ord(__lowerCAmelCase ) ):
ref_id.append(__lowerCAmelCase )
ref_ids.append(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
return ref_ids
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
with open(args.file_name , """r""" , encoding="""utf-8""" ) as f:
lowercase_ = f.readlines()
lowercase_ = [line.strip() for line in data if len(__lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
lowercase_ = LTP(args.ltp ) # faster in GPU device
lowercase_ = BertTokenizer.from_pretrained(args.bert )
lowercase_ = prepare_ref(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
with open(args.save_path , """w""" , encoding="""utf-8""" ) as f:
lowercase_ = [json.dumps(__lowerCAmelCase ) + """\n""" for ref in ref_ids]
f.writelines(__lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase : List[Any] = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp", type=str, default="./resources/ltp", help="resources for LTP tokenizer, usually a path"
)
parser.add_argument("--bert", type=str, default="./resources/robert", help="resources for Bert tokenizer")
parser.add_argument("--save_path", type=str, default="./resources/ref.txt", help="path to save res")
UpperCAmelCase : int = parser.parse_args()
main(args)
| 313
| 0
|
"""simple docstring"""
from itertools import permutations
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> bool:
'''simple docstring'''
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
lowercase_ = [7, 11, 13, 17]
for i, test in enumerate(SCREAMING_SNAKE_CASE_ ):
if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = 10 ) -> int:
'''simple docstring'''
return sum(
int("""""".join(map(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) )
for num in permutations(range(SCREAMING_SNAKE_CASE_ ) )
if is_substring_divisible(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 366
|
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> list[str]:
'''simple docstring'''
if nth_term == "":
return [""]
lowercase_ = int(__lowerCAmelCase )
lowercase_ = int(__lowerCAmelCase )
lowercase_ = []
for temp in range(int(__lowerCAmelCase ) ):
series.append(F'''1 / {pow(temp + 1 , int(__lowerCAmelCase ) )}''' if series else """1""" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase : List[str] = int(input("Enter the last number (nth term) of the P-Series"))
UpperCAmelCase : Tuple = int(input("Enter the power for P-Series"))
print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
print(p_series(nth_term, power))
| 313
| 0
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Any:
'''simple docstring'''
lowercase_ = 3_84
lowercase_ = 7
if "tiny" in model_name:
lowercase_ = 96
lowercase_ = (2, 2, 6, 2)
lowercase_ = (3, 6, 12, 24)
elif "small" in model_name:
lowercase_ = 96
lowercase_ = (2, 2, 18, 2)
lowercase_ = (3, 6, 12, 24)
elif "base" in model_name:
lowercase_ = 1_28
lowercase_ = (2, 2, 18, 2)
lowercase_ = (4, 8, 16, 32)
lowercase_ = 12
lowercase_ = 5_12
elif "large" in model_name:
lowercase_ = 1_92
lowercase_ = (2, 2, 18, 2)
lowercase_ = (6, 12, 24, 48)
lowercase_ = 12
lowercase_ = 7_68
# set label information
lowercase_ = 1_50
lowercase_ = """huggingface/label-files"""
lowercase_ = """ade20k-id2label.json"""
lowercase_ = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
lowercase_ = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
lowercase_ = {v: k for k, v in idalabel.items()}
lowercase_ = SwinConfig(
embed_dim=UpperCamelCase__ , depths=UpperCamelCase__ , num_heads=UpperCamelCase__ , window_size=UpperCamelCase__ , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
lowercase_ = UperNetConfig(
backbone_config=UpperCamelCase__ , auxiliary_in_channels=UpperCamelCase__ , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ , )
return config
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = []
# fmt: off
# stem
rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm1.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm1.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm2.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm2.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((F'''backbone.stages.{i}.downsample.reduction.weight''', F'''backbone.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((F'''backbone.stages.{i}.downsample.norm.weight''', F'''backbone.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((F'''backbone.stages.{i}.downsample.norm.bias''', F'''backbone.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((F'''backbone.norm{i}.weight''', F'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((F'''backbone.norm{i}.bias''', F'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = dct.pop(UpperCamelCase__ )
lowercase_ = val
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Any:
'''simple docstring'''
lowercase_ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
lowercase_ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowercase_ = state_dict.pop(F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' )
lowercase_ = state_dict.pop(F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase_ = in_proj_weight[:dim, :]
lowercase_ = in_proj_bias[: dim]
lowercase_ = in_proj_weight[
dim : dim * 2, :
]
lowercase_ = in_proj_bias[
dim : dim * 2
]
lowercase_ = in_proj_weight[
-dim :, :
]
lowercase_ = in_proj_bias[-dim :]
# fmt: on
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Any:
'''simple docstring'''
lowercase_ , lowercase_ = x.shape
lowercase_ = x.reshape(UpperCamelCase__ , 4 , in_channel // 4 )
lowercase_ = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(UpperCamelCase__ , UpperCamelCase__ )
return x
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ , lowercase_ = x.shape
lowercase_ = x.reshape(UpperCamelCase__ , in_channel // 4 , 4 )
lowercase_ = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(UpperCamelCase__ , UpperCamelCase__ )
return x
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ = x.shape[0]
lowercase_ = x.reshape(4 , in_channel // 4 )
lowercase_ = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(UpperCamelCase__ )
return x
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ = x.shape[0]
lowercase_ = x.reshape(in_channel // 4 , 4 )
lowercase_ = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(UpperCamelCase__ )
return x
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
'''simple docstring'''
lowercase_ = {
"""upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""",
"""upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""",
"""upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""",
"""upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""",
}
lowercase_ = model_name_to_url[model_name]
lowercase_ = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location="""cpu""" , file_name=UpperCamelCase__ )[
"""state_dict"""
]
for name, param in state_dict.items():
print(UpperCamelCase__ , param.shape )
lowercase_ = get_upernet_config(UpperCamelCase__ )
lowercase_ = UperNetForSemanticSegmentation(UpperCamelCase__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
lowercase_ = state_dict.pop(UpperCamelCase__ )
if "bn" in key:
lowercase_ = key.replace("""bn""" , """batch_norm""" )
lowercase_ = val
# rename keys
lowercase_ = create_rename_keys(UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
read_in_q_k_v(UpperCamelCase__ , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
lowercase_ = reverse_correct_unfold_reduction_order(UpperCamelCase__ )
if "norm" in key:
lowercase_ = reverse_correct_unfold_norm_order(UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
# verify on image
lowercase_ = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
lowercase_ = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ).convert("""RGB""" )
lowercase_ = SegformerImageProcessor()
lowercase_ = processor(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
with torch.no_grad():
lowercase_ = model(UpperCamelCase__ )
lowercase_ = outputs.logits
print(logits.shape )
print("""First values of logits:""" , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
lowercase_ = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] )
elif model_name == "upernet-swin-small":
lowercase_ = torch.tensor(
[[-7.1_921, -7.1_921, -6.9_532], [-7.1_921, -7.1_921, -6.9_532], [-7.0_908, -7.0_908, -6.8_534]] )
elif model_name == "upernet-swin-base":
lowercase_ = torch.tensor(
[[-6.5_851, -6.5_851, -6.4_330], [-6.5_851, -6.5_851, -6.4_330], [-6.4_763, -6.4_763, -6.3_254]] )
elif model_name == "upernet-swin-large":
lowercase_ = torch.tensor(
[[-7.5_297, -7.5_297, -7.3_802], [-7.5_297, -7.5_297, -7.3_802], [-7.4_044, -7.4_044, -7.2_586]] )
print("""Logits:""" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCamelCase__ , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase__ )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print(F'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(F'''openmmlab/{model_name}''' )
processor.push_to_hub(F'''openmmlab/{model_name}''' )
if __name__ == "__main__":
UpperCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="upernet-swin-tiny",
type=str,
choices=[F"upernet-swin-{size}" for size in ["tiny", "small", "base", "large"]],
help="Name of the Swin + UperNet model you\'d like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
UpperCAmelCase : str = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 367
|
"""simple docstring"""
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = Vector([1, 2, 3])
self.assertEqual(x.component(0) , 1)
self.assertEqual(x.component(2) , 3)
lowercase_ = Vector()
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = Vector([0, 0, 0, 0, 0, 1])
self.assertEqual(str(lowerCAmelCase_) , """(0,0,0,0,0,1)""")
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = Vector([1, 2, 3, 4])
self.assertEqual(len(lowerCAmelCase_) , 4)
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = Vector([1, 2])
lowercase_ = Vector([1, 2, 3, 4, 5])
lowercase_ = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
lowercase_ = Vector([1, -1, 1, -1, 2, -3, 4, -5])
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3)
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3)
self.assertEqual(z.euclidean_length() , 0)
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = Vector([1, 2, 3])
lowercase_ = Vector([1, 1, 1])
self.assertEqual((x + y).component(0) , 2)
self.assertEqual((x + y).component(1) , 3)
self.assertEqual((x + y).component(2) , 4)
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = Vector([1, 2, 3])
lowercase_ = Vector([1, 1, 1])
self.assertEqual((x - y).component(0) , 0)
self.assertEqual((x - y).component(1) , 1)
self.assertEqual((x - y).component(2) , 2)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = Vector([1, 2, 3])
lowercase_ = Vector([2, -1, 4]) # for test of dot product
lowercase_ = Vector([1, -2, -1])
self.assertEqual(str(x * 3.0) , """(3.0,6.0,9.0)""")
self.assertEqual((a * b) , 0)
def _UpperCAmelCase ( self : int):
"""simple docstring"""
self.assertEqual(str(zero_vector(1_0)).count("""0""") , 1_0)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
self.assertEqual(str(unit_basis_vector(3 , 1)) , """(0,1,0)""")
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = Vector([1, 2, 3])
lowercase_ = Vector([1, 0, 1])
self.assertEqual(str(axpy(2 , lowerCAmelCase_ , lowerCAmelCase_)) , """(3,4,7)""")
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = Vector([1, 0, 0, 0, 0, 0])
lowercase_ = x.copy()
self.assertEqual(str(lowerCAmelCase_) , str(lowerCAmelCase_))
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = Vector([1, 0, 0])
x.change_component(0 , 0)
x.change_component(1 , 1)
self.assertEqual(str(lowerCAmelCase_) , """(0,1,0)""")
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(lowerCAmelCase_))
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
lowercase_ = [[-3, -1_4, -1_0], [-5, -1_0, -5], [-2, -1, 0]]
for x in range(a.height()):
for y in range(a.width()):
self.assertEqual(minors[x][y] , a.minor(lowerCAmelCase_ , lowerCAmelCase_))
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
lowercase_ = [[-3, 1_4, -1_0], [5, -1_0, 5], [-2, 1, 0]]
for x in range(a.height()):
for y in range(a.width()):
self.assertEqual(cofactors[x][y] , a.cofactor(lowerCAmelCase_ , lowerCAmelCase_))
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
self.assertEqual(-5 , a.determinant())
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3)
lowercase_ = Vector([1, 2, 3])
self.assertEqual("""(14,32,50)""" , str(a * x))
self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2))
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
a.change_component(0 , 2 , 5)
self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(lowerCAmelCase_))
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
self.assertEqual(7 , a.component(2 , 1) , 0.01)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
lowercase_ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 1_0]] , 3 , 3)
self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b))
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
lowercase_ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 1_0]] , 3 , 3)
self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b))
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
self.assertEqual(
"""|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5)) , )
if __name__ == "__main__":
unittest.main()
| 313
| 0
|
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase : List[str] = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( A_ , unittest.TestCase ):
lowercase__ = XGLMTokenizer
lowercase__ = XGLMTokenizerFast
lowercase__ = True
lowercase__ = True
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowercase_ = XGLMTokenizer(_lowerCamelCase , keep_accents=_lowerCamelCase)
tokenizer.save_pretrained(self.tmpdirname)
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
lowercase_ = """<pad>"""
lowercase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase) , _lowerCamelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase) , _lowerCamelCase)
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , """<s>""")
self.assertEqual(vocab_keys[1] , """<pad>""")
self.assertEqual(len(_lowerCamelCase) , 1_0_0_8)
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_8)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = XGLMTokenizer(_lowerCamelCase , keep_accents=_lowerCamelCase)
lowercase_ = tokenizer.tokenize("""This is a test""")
self.assertListEqual(_lowerCamelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCamelCase) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowercase_ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""")
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowercase_ = tokenizer.convert_tokens_to_ids(_lowerCamelCase)
self.assertListEqual(
_lowerCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
lowercase_ = tokenizer.convert_ids_to_tokens(_lowerCamelCase)
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def _UpperCAmelCase ( self : int):
"""simple docstring"""
return XGLMTokenizer.from_pretrained("""facebook/xglm-564M""")
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(_lowerCamelCase , f.name)
lowercase_ = XGLMTokenizer(f.name , keep_accents=_lowerCamelCase)
lowercase_ = pickle.dumps(_lowerCamelCase)
pickle.loads(_lowerCamelCase)
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowercase_ = self.get_tokenizer()
lowercase_ = self.get_rust_tokenizer()
lowercase_ = """I was born in 92000, and this is falsé."""
lowercase_ = tokenizer.tokenize(_lowerCamelCase)
lowercase_ = rust_tokenizer.tokenize(_lowerCamelCase)
self.assertListEqual(_lowerCamelCase , _lowerCamelCase)
lowercase_ = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase)
lowercase_ = rust_tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase)
self.assertListEqual(_lowerCamelCase , _lowerCamelCase)
lowercase_ = self.get_rust_tokenizer()
lowercase_ = tokenizer.encode(_lowerCamelCase)
lowercase_ = rust_tokenizer.encode(_lowerCamelCase)
self.assertListEqual(_lowerCamelCase , _lowerCamelCase)
@slow
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = """Hello World!"""
lowercase_ = [2, 3_1_2_2_7, 4_4_4_7, 3_5]
self.assertListEqual(_lowerCamelCase , self.big_tokenizer.encode(_lowerCamelCase))
@slow
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"""
)
# fmt: off
lowercase_ = [2, 1_0_1_8, 6_7, 1_1, 1_9_8_8, 2_6_1_7, 5_6_3_1, 2_7_8, 1_1, 3_4_0_7, 4_8, 7_1_6_3_0, 2_8_0_8_5, 4, 3_2_3_4, 1_5_7, 1_3, 6, 5, 6, 4, 3_5_2_6, 7_6_8, 1_5, 6_5_9, 5_7, 2_9_8, 3_9_8_3, 8_6_4, 1_2_9, 2_1, 6, 5, 1_3_6_7_5, 3_7_7, 6_5_2, 7_5_8_0, 1_0_3_4_1, 1_5_5, 2_8_1_7, 4_2_2, 1_6_6_6, 7, 1_6_7_4, 5_3, 1_1_3, 2_0_2_2_7_7, 1_7_8_9_2, 3_3, 6_0, 8_7, 4, 3_2_3_4, 1_5_7, 6_1, 2_6_6_7, 5_2_3_7_6, 1_9, 8_8, 2_3, 7_3_5]
# fmt: on
self.assertListEqual(_lowerCamelCase , self.big_tokenizer.encode(_lowerCamelCase))
@slow
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = {
"""input_ids""": [[2, 1_0_8_8_2_5, 1_1_6_3, 1_5, 8_8_0_1_0, 4_7_3, 1_5_8_9_8, 1_5_7, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 2_3_8_0_2_1, 1_1_6_3, 5_3, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 5_3_2_8_3, 1_8_2_3_9_6, 8, 1_8_5_6_6, 1_6, 3_6_7_3_3, 4_1_0_1, 8, 2_3_0, 2_4_4_0_1_7, 1_2_2_5_5_3, 7, 1_5, 1_3_2_5_9_7, 4, 2_9_3, 1_2_5_1_1, 7_6_1_0, 4, 3_4_1_4, 1_3_2_5_9_7, 9, 4, 3_2_3_6_1, 3_6_2, 4, 7_3_4, 2_8_5_1_2, 3_2_5_6_9, 1_8, 4, 3_2_3_6_1, 2_6_0_9_6, 1_4_9_8_2, 7_3, 1_8_7_1_5, 2_1_4_3_3, 2_3_5_2_6_1, 1_5, 4_9_2, 1_2_4_2_7, 1_6, 5_3, 1_8_7_1_5, 2_1_4_3_3, 6_5_4_5_4, 1_5, 2_3_6_5_9, 5_6_3, 1_6, 2_7_8, 5_9_7, 2_8_4_3, 5_9_5, 7_9_3_1, 1_8_2_3_9_6, 6_4_1_8_6, 2_2, 8_8_6, 5_9_5, 1_3_2_9_8_1, 5_3, 2_5_5_4_0, 3_4_4_9, 4_3_9_8_2, 3_9_9_0_1, 5_9_5_1, 8_7_8, 3_3_0, 4, 2_7_6_9_4, 8_0_2_6_9, 3_1_2, 5_3, 6_5_1_7, 1_1_7_8_0, 6_1_1, 2_0_4_0_8, 5], [2, 6, 1_3_2_5_9_7, 6_7, 4_2_8_9_7, 3_3, 5_9_2, 8, 1_6_3_7_2_9, 2_5_5_4_0, 3_6_1, 1_3_6_9_9_7, 1_0_9_5_1_4, 1_7_3_2_3_0, 7, 5_0_1, 6_0, 1_0_2_9_1_3, 1_9_6, 5_6_3_1, 2_3_5, 6_3_2_4_3, 4_7_3, 6, 2_3_1_7_5_7, 7_4, 5_2_7_7, 7_9_0_5, 5_3, 3_0_9_5, 3_7_3_1_7, 2_2, 4_5_4, 1_8_3_8_7_4, 5], [2, 2_6_8, 3_1_2_9_8, 4_6_5_3_0, 6, 1_3_2_9_3_5, 4_3_8_3_1, 7, 5_9_7, 3_2, 2_4, 3_6_8_8, 9_8_6_5, 5]],
"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCamelCase , model_name="""facebook/xglm-564M""" , padding=_lowerCamelCase , )
| 368
|
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = 0
if start < end:
lowercase_ = randint(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = a[end]
lowercase_ = a[pivot]
lowercase_ = temp
lowercase_ , lowercase_ = _in_place_partition(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
count += _in_place_quick_sort(__lowerCAmelCase , __lowerCAmelCase , p - 1 )
count += _in_place_quick_sort(__lowerCAmelCase , p + 1 , __lowerCAmelCase )
return count
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = 0
lowercase_ = randint(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = a[end]
lowercase_ = a[pivot]
lowercase_ = temp
lowercase_ = start - 1
for index in range(__lowerCAmelCase , __lowerCAmelCase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
lowercase_ = new_pivot_index + 1
lowercase_ = a[new_pivot_index]
lowercase_ = a[index]
lowercase_ = temp
lowercase_ = a[new_pivot_index + 1]
lowercase_ = a[end]
lowercase_ = temp
return new_pivot_index + 1, count
UpperCAmelCase : Union[str, Any] = TemporaryFile()
UpperCAmelCase : Optional[int] = 100 # 1000 elements are to be sorted
UpperCAmelCase , UpperCAmelCase : List[str] = 0, 1 # mean and standard deviation
UpperCAmelCase : Optional[Any] = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
UpperCAmelCase : List[str] = np.load(outfile)
UpperCAmelCase : List[Any] = len(M) - 1
UpperCAmelCase : Optional[int] = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 313
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase : Union[str, Any] = {
"configuration_data2vec_audio": ["DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecAudioConfig"],
"configuration_data2vec_text": [
"DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecTextConfig",
"Data2VecTextOnnxConfig",
],
"configuration_data2vec_vision": [
"DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecVisionConfig",
"Data2VecVisionOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Union[str, Any] = [
"DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecAudioForAudioFrameClassification",
"Data2VecAudioForCTC",
"Data2VecAudioForSequenceClassification",
"Data2VecAudioForXVector",
"Data2VecAudioModel",
"Data2VecAudioPreTrainedModel",
]
UpperCAmelCase : Optional[int] = [
"DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecTextForCausalLM",
"Data2VecTextForMaskedLM",
"Data2VecTextForMultipleChoice",
"Data2VecTextForQuestionAnswering",
"Data2VecTextForSequenceClassification",
"Data2VecTextForTokenClassification",
"Data2VecTextModel",
"Data2VecTextPreTrainedModel",
]
UpperCAmelCase : Optional[int] = [
"DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecVisionForImageClassification",
"Data2VecVisionForMaskedImageModeling",
"Data2VecVisionForSemanticSegmentation",
"Data2VecVisionModel",
"Data2VecVisionPreTrainedModel",
]
if is_tf_available():
UpperCAmelCase : List[str] = [
"TFData2VecVisionForImageClassification",
"TFData2VecVisionForSemanticSegmentation",
"TFData2VecVisionModel",
"TFData2VecVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
UpperCAmelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 369
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase=False ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''module.blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''module.blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""module.cls_token""", """vit.embeddings.cls_token"""),
("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""module.pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""module.norm.weight""", """layernorm.weight"""),
("""module.norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowercase_ = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ) -> Any:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
lowercase_ = """"""
else:
lowercase_ = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase_ = state_dict.pop(F'''module.blocks.{i}.attn.qkv.weight''' )
lowercase_ = state_dict.pop(F'''module.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase_ = in_proj_weight[
: config.hidden_size, :
]
lowercase_ = in_proj_bias[: config.hidden_size]
lowercase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase_ = in_proj_weight[
-config.hidden_size :, :
]
lowercase_ = in_proj_bias[-config.hidden_size :]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase_ = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = [
"""module.fc.fc1.weight""",
"""module.fc.fc1.bias""",
"""module.fc.bn1.weight""",
"""module.fc.bn1.bias""",
"""module.fc.bn1.running_mean""",
"""module.fc.bn1.running_var""",
"""module.fc.bn1.num_batches_tracked""",
"""module.fc.fc2.weight""",
"""module.fc.fc2.bias""",
"""module.fc.bn2.weight""",
"""module.fc.bn2.bias""",
"""module.fc.bn2.running_mean""",
"""module.fc.bn2.running_var""",
"""module.fc.bn2.num_batches_tracked""",
"""module.fc.fc3.weight""",
"""module.fc.fc3.bias""",
]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = dct.pop(__lowerCAmelCase )
lowercase_ = val
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = ViTMSNConfig()
lowercase_ = 10_00
lowercase_ = """datasets/huggingface/label-files"""
lowercase_ = """imagenet-1k-id2label.json"""
lowercase_ = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase ) , """r""" ) )
lowercase_ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
lowercase_ = idalabel
lowercase_ = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowercase_ = 3_84
lowercase_ = 15_36
lowercase_ = 6
elif "l16" in checkpoint_url:
lowercase_ = 10_24
lowercase_ = 40_96
lowercase_ = 24
lowercase_ = 16
lowercase_ = 0.1
elif "b4" in checkpoint_url:
lowercase_ = 4
elif "l7" in checkpoint_url:
lowercase_ = 7
lowercase_ = 10_24
lowercase_ = 40_96
lowercase_ = 24
lowercase_ = 16
lowercase_ = 0.1
lowercase_ = ViTMSNModel(__lowerCAmelCase )
lowercase_ = torch.hub.load_state_dict_from_url(__lowerCAmelCase , map_location="""cpu""" )["""target_encoder"""]
lowercase_ = ViTImageProcessor(size=config.image_size )
remove_projection_head(__lowerCAmelCase )
lowercase_ = create_rename_keys(__lowerCAmelCase , base_model=__lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase , base_model=__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
lowercase_ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase_ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
lowercase_ = ViTImageProcessor(
size=config.image_size , image_mean=__lowerCAmelCase , image_std=__lowerCAmelCase )
lowercase_ = image_processor(images=__lowerCAmelCase , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
lowercase_ = model(**__lowerCAmelCase )
lowercase_ = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowercase_ = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] )
elif "b16" in checkpoint_url:
lowercase_ = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] )
elif "l16" in checkpoint_url:
lowercase_ = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] )
elif "b4" in checkpoint_url:
lowercase_ = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] )
else:
lowercase_ = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , __lowerCAmelCase , atol=1E-4 )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCAmelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
UpperCAmelCase : Tuple = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 313
| 0
|
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
UpperCAmelCase : Optional[Any] = ""
UpperCAmelCase : Tuple = ""
UpperCAmelCase : List[str] = ""
UpperCAmelCase : List[Any] = 1 # (0 is vertical, 1 is horizontal)
def _SCREAMING_SNAKE_CASE () -> Dict:
'''simple docstring'''
lowercase_ = get_dataset(__lowerCAmelCase , __lowerCAmelCase )
print("""Processing...""" )
lowercase_ = update_image_and_anno(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
for index, image in enumerate(__lowerCAmelCase ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
lowercase_ = random_chars(32 )
lowercase_ = paths[index].split(os.sep )[-1].rsplit(""".""" , 1 )[0]
lowercase_ = F'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'''
cva.imwrite(F'''/{file_root}.jpg''' , __lowerCAmelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'''Success {index+1}/{len(__lowerCAmelCase )} with {file_name}''' )
lowercase_ = []
for anno in new_annos[index]:
lowercase_ = F'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'''
annos_list.append(__lowerCAmelCase )
with open(F'''/{file_root}.txt''' , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = []
lowercase_ = []
for label_file in glob.glob(os.path.join(__lowerCAmelCase , """*.txt""" ) ):
lowercase_ = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(__lowerCAmelCase ) as in_file:
lowercase_ = in_file.readlines()
lowercase_ = os.path.join(__lowerCAmelCase , F'''{label_name}.jpg''' )
lowercase_ = []
for obj_list in obj_lists:
lowercase_ = obj_list.rstrip("""\n""" ).split(""" """ )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__lowerCAmelCase )
labels.append(__lowerCAmelCase )
return img_paths, labels
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 1 ) -> List[str]:
'''simple docstring'''
lowercase_ = []
lowercase_ = []
lowercase_ = []
for idx in range(len(__lowerCAmelCase ) ):
lowercase_ = []
lowercase_ = img_list[idx]
path_list.append(__lowerCAmelCase )
lowercase_ = anno_list[idx]
lowercase_ = cva.imread(__lowerCAmelCase )
if flip_type == 1:
lowercase_ = cva.flip(__lowerCAmelCase , __lowerCAmelCase )
for bbox in img_annos:
lowercase_ = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
lowercase_ = cva.flip(__lowerCAmelCase , __lowerCAmelCase )
for bbox in img_annos:
lowercase_ = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__lowerCAmelCase )
new_imgs_list.append(__lowerCAmelCase )
return new_imgs_list, new_annos_lists, path_list
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = 32 ) -> int:
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
lowercase_ = ascii_lowercase + digits
return "".join(random.choice(__lowerCAmelCase ) for _ in range(__lowerCAmelCase ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 370
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
UpperCAmelCase : Tuple = logging.get_logger(__name__)
UpperCAmelCase : Optional[int] = {
"deepmind/language-perceiver": "https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "perceiver"
def __init__( self : Optional[int] , lowerCAmelCase_ : List[str]=2_5_6 , lowerCAmelCase_ : Dict=1_2_8_0 , lowerCAmelCase_ : List[Any]=7_6_8 , lowerCAmelCase_ : Optional[Any]=1 , lowerCAmelCase_ : List[Any]=2_6 , lowerCAmelCase_ : Optional[Any]=8 , lowerCAmelCase_ : Tuple=8 , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Optional[Any]="kv" , lowerCAmelCase_ : Dict=1 , lowerCAmelCase_ : Optional[Any]=1 , lowerCAmelCase_ : List[str]="gelu" , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Union[str, Any]=0.02 , lowerCAmelCase_ : List[Any]=1E-12 , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Union[str, Any]=2_6_2 , lowerCAmelCase_ : Union[str, Any]=2_0_4_8 , lowerCAmelCase_ : Any=5_6 , lowerCAmelCase_ : int=[3_6_8, 4_9_6] , lowerCAmelCase_ : Optional[int]=1_6 , lowerCAmelCase_ : Dict=1_9_2_0 , lowerCAmelCase_ : Optional[Any]=1_6 , lowerCAmelCase_ : Tuple=[1, 1_6, 2_2_4, 2_2_4] , **lowerCAmelCase_ : Union[str, Any] , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_)
lowercase_ = num_latents
lowercase_ = d_latents
lowercase_ = d_model
lowercase_ = num_blocks
lowercase_ = num_self_attends_per_block
lowercase_ = num_self_attention_heads
lowercase_ = num_cross_attention_heads
lowercase_ = qk_channels
lowercase_ = v_channels
lowercase_ = cross_attention_shape_for_attention
lowercase_ = self_attention_widening_factor
lowercase_ = cross_attention_widening_factor
lowercase_ = hidden_act
lowercase_ = attention_probs_dropout_prob
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = use_query_residual
# masked language modeling attributes
lowercase_ = vocab_size
lowercase_ = max_position_embeddings
# image classification attributes
lowercase_ = image_size
# flow attributes
lowercase_ = train_size
# multimodal autoencoding attributes
lowercase_ = num_frames
lowercase_ = audio_samples_per_frame
lowercase_ = samples_per_patch
lowercase_ = output_shape
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
@property
def _UpperCAmelCase ( self : str):
"""simple docstring"""
if self.task == "multiple-choice":
lowercase_ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowercase_ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""inputs""", dynamic_axis),
("""attention_mask""", dynamic_axis),
])
@property
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
return 1E-4
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional[TensorType] = None , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 4_0 , lowerCAmelCase_ : int = 4_0 , ):
"""simple docstring"""
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase_ = compute_effective_axis_dimension(
lowerCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase_ = preprocessor.num_special_tokens_to_add(lowerCAmelCase_)
lowercase_ = compute_effective_axis_dimension(
lowerCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase_)
# Generate dummy inputs according to compute batch and sequence
lowercase_ = [""" """.join(["""a"""]) * seq_length] * batch_size
lowercase_ = dict(preprocessor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_))
lowercase_ = inputs.pop("""input_ids""")
return inputs
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase_ = compute_effective_axis_dimension(lowerCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch)
lowercase_ = self._generate_dummy_images(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = dict(preprocessor(images=lowerCAmelCase_ , return_tensors=lowerCAmelCase_))
lowercase_ = inputs.pop("""pixel_values""")
return inputs
else:
raise ValueError(
"""Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.""")
| 313
| 0
|
"""simple docstring"""
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def _SCREAMING_SNAKE_CASE () -> Dict:
'''simple docstring'''
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
lowercase_ = """__test_patch_submodule_mock__"""
with patch_submodule(_test_patching , """os.path.join""" , _lowerCAmelCase ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def _SCREAMING_SNAKE_CASE () -> Union[str, Any]:
'''simple docstring'''
assert _test_patching.open is open
lowercase_ = """__test_patch_submodule_builtin_mock__"""
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , """open""" , _lowerCAmelCase ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def _SCREAMING_SNAKE_CASE () -> Any:
'''simple docstring'''
lowercase_ = """__test_patch_submodule_missing_mock__"""
with patch_submodule(_test_patching , """pandas.read_csv""" , _lowerCAmelCase ):
pass
def _SCREAMING_SNAKE_CASE () -> Dict:
'''simple docstring'''
lowercase_ = """__test_patch_submodule_missing_builtin_mock__"""
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , """len""" , _lowerCAmelCase ) is None
with patch_submodule(_test_patching , """len""" , _lowerCAmelCase ):
assert _test_patching.len is mock
assert _test_patching.len is len
def _SCREAMING_SNAKE_CASE () -> Optional[int]:
'''simple docstring'''
lowercase_ = """__test_patch_submodule_start_and_stop_mock__"""
lowercase_ = patch_submodule(_test_patching , """open""" , _lowerCAmelCase )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def _SCREAMING_SNAKE_CASE () -> List[str]:
'''simple docstring'''
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
lowercase_ = """__test_patch_submodule_successive_join__"""
lowercase_ = """__test_patch_submodule_successive_dirname__"""
lowercase_ = """__test_patch_submodule_successive_rename__"""
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , """os.path.join""" , _lowerCAmelCase ):
with patch_submodule(_test_patching , """os.rename""" , _lowerCAmelCase ):
with patch_submodule(_test_patching , """os.path.dirname""" , _lowerCAmelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , """os.rename""" , _lowerCAmelCase ):
with patch_submodule(_test_patching , """os.path.join""" , _lowerCAmelCase ):
with patch_submodule(_test_patching , """os.path.dirname""" , _lowerCAmelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def _SCREAMING_SNAKE_CASE () -> Any:
'''simple docstring'''
lowercase_ = """__test_patch_submodule_doesnt_exist_mock__"""
with patch_submodule(_test_patching , """__module_that_doesn_exist__.__attribute_that_doesn_exist__""" , _lowerCAmelCase ):
pass
with patch_submodule(_test_patching , """os.__attribute_that_doesn_exist__""" , _lowerCAmelCase ):
pass
| 371
|
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , unittest.TestCase ):
lowercase__ = BarthezTokenizer
lowercase__ = BarthezTokenizerFast
lowercase__ = True
lowercase__ = True
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
super().setUp()
lowercase_ = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""")
tokenizer.save_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname , legacy_format=lowerCAmelCase_)
lowercase_ = tokenizer
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = """<pad>"""
lowercase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_) , lowerCAmelCase_)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_) , lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , """<s>""")
self.assertEqual(vocab_keys[1] , """<pad>""")
self.assertEqual(vocab_keys[-1] , """<mask>""")
self.assertEqual(len(lowerCAmelCase_) , 1_0_1_1_2_2)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_1_2_2)
@require_torch
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
lowercase_ = [0, 5_7, 3_0_1_8, 7_0_3_0_7, 9_1, 2]
lowercase_ = self.tokenizer(
lowerCAmelCase_ , max_length=len(lowerCAmelCase_) , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors="""pt""")
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_)
self.assertEqual((2, 6) , batch.input_ids.shape)
self.assertEqual((2, 6) , batch.attention_mask.shape)
lowercase_ = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowercase_ = self.get_tokenizer()
lowercase_ = self.get_rust_tokenizer()
lowercase_ = """I was born in 92000, and this is falsé."""
lowercase_ = tokenizer.tokenize(lowerCAmelCase_)
lowercase_ = rust_tokenizer.tokenize(lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_)
lowercase_ = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = self.get_rust_tokenizer()
lowercase_ = tokenizer.encode(lowerCAmelCase_)
lowercase_ = rust_tokenizer.encode(lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
@slow
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = {"""input_ids""": [[0, 4_9_0, 1_4_3_2_8, 4_5_0_7, 3_5_4, 4_7, 4_3_6_6_9, 9_5, 2_5, 7_8_1_1_7, 2_0_2_1_5, 1_9_7_7_9, 1_9_0, 2_2, 4_0_0, 4, 3_5_3_4_3, 8_0_3_1_0, 6_0_3, 8_6, 2_4_9_3_7, 1_0_5, 3_3_4_3_8, 9_4_7_6_2, 1_9_6, 3_9_6_4_2, 7, 1_5, 1_5_9_3_3, 1_7_3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0_5_3_4, 8_7, 2_5, 6_6, 3_3_5_8, 1_9_6, 5_5_2_8_9, 8, 8_2_9_6_1, 8_1, 2_2_0_4, 7_5_2_0_3, 7, 1_5, 7_6_3, 1_2_9_5_6, 2_1_6, 1_7_8, 1_4_3_2_8, 9_5_9_5, 1_3_7_7, 6_9_6_9_3, 7, 4_4_8, 7_1_0_2_1, 1_9_6, 1_8_1_0_6, 1_4_3_7, 1_3_9_7_4, 1_0_8, 9_0_8_3, 4, 4_9_3_1_5, 7, 3_9, 8_6, 1_3_2_6, 2_7_9_3, 4_6_3_3_3, 4, 4_4_8, 1_9_6, 7_4_5_8_8, 7, 4_9_3_1_5, 7, 3_9, 2_1, 8_2_2, 3_8_4_7_0, 7_4, 2_1, 6_6_7_2_3, 6_2_4_8_0, 8, 2_2_0_5_0, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
lowercase_ = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase_ , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=lowerCAmelCase_ , )
| 313
| 0
|
"""simple docstring"""
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
UpperCAmelCase : int = logging.getLogger(__name__)
UpperCAmelCase : List[Any] = {"facebook/bart-base": BartForConditionalGeneration}
UpperCAmelCase : List[str] = {"facebook/bart-base": BartTokenizer}
def _SCREAMING_SNAKE_CASE () -> Any:
'''simple docstring'''
lowercase_ = argparse.ArgumentParser(description="""Export Bart model + Beam Search to ONNX graph.""" )
parser.add_argument(
"""--validation_file""" , type=__lowerCAmelCase , default=__lowerCAmelCase , help="""A csv or a json file containing the validation data.""" )
parser.add_argument(
"""--max_length""" , type=__lowerCAmelCase , default=5 , help="""The maximum total input sequence length after tokenization.""" , )
parser.add_argument(
"""--num_beams""" , type=__lowerCAmelCase , default=__lowerCAmelCase , help=(
"""Number of beams to use for evaluation. This argument will be """
"""passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."""
) , )
parser.add_argument(
"""--model_name_or_path""" , type=__lowerCAmelCase , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=__lowerCAmelCase , )
parser.add_argument(
"""--config_name""" , type=__lowerCAmelCase , default=__lowerCAmelCase , help="""Pretrained config name or path if not the same as model_name""" , )
parser.add_argument(
"""--device""" , type=__lowerCAmelCase , default="""cpu""" , help="""Device where the model will be run""" , )
parser.add_argument("""--output_file_path""" , type=__lowerCAmelCase , default=__lowerCAmelCase , help="""Where to store the final ONNX file.""" )
lowercase_ = parser.parse_args()
return args
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase="cpu" ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = model_dict[model_name].from_pretrained(__lowerCAmelCase ).to(__lowerCAmelCase )
lowercase_ = tokenizer_dict[model_name].from_pretrained(__lowerCAmelCase )
if model_name in ["facebook/bart-base"]:
lowercase_ = 0
lowercase_ = None
lowercase_ = 0
return huggingface_model, tokenizer
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
model.eval()
lowercase_ = None
lowercase_ = torch.jit.script(BARTBeamSearchGenerator(__lowerCAmelCase ) )
with torch.no_grad():
lowercase_ = '''My friends are cool but they eat too many carbs.'''
lowercase_ = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=10_24 , return_tensors="""pt""" ).to(model.device )
lowercase_ = model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , num_beams=__lowerCAmelCase , max_length=__lowerCAmelCase , early_stopping=__lowerCAmelCase , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
__lowerCAmelCase , (
inputs["""input_ids"""],
inputs["""attention_mask"""],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , __lowerCAmelCase , opset_version=14 , input_names=["""input_ids""", """attention_mask""", """num_beams""", """max_length""", """decoder_start_token_id"""] , output_names=["""output_ids"""] , dynamic_axes={
"""input_ids""": {0: """batch""", 1: """seq"""},
"""output_ids""": {0: """batch""", 1: """seq_out"""},
} , example_outputs=__lowerCAmelCase , )
logger.info("""Model exported to {}""".format(__lowerCAmelCase ) )
lowercase_ = remove_dup_initializers(os.path.abspath(__lowerCAmelCase ) )
logger.info("""Deduplicated and optimized model written to {}""".format(__lowerCAmelCase ) )
lowercase_ = onnxruntime.InferenceSession(__lowerCAmelCase )
lowercase_ = ort_sess.run(
__lowerCAmelCase , {
"""input_ids""": inputs["""input_ids"""].cpu().numpy(),
"""attention_mask""": inputs["""attention_mask"""].cpu().numpy(),
"""num_beams""": np.array(__lowerCAmelCase ),
"""max_length""": np.array(__lowerCAmelCase ),
"""decoder_start_token_id""": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info("""Model outputs from torch and ONNX Runtime are similar.""" )
logger.info("""Success.""" )
def _SCREAMING_SNAKE_CASE () -> Any:
'''simple docstring'''
lowercase_ = parse_args()
lowercase_ = 5
lowercase_ = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
lowercase_ = torch.device(args.device )
lowercase_ = load_model_tokenizer(args.model_name_or_path , __lowerCAmelCase )
if model.config.decoder_start_token_id is None:
raise ValueError("""Make sure that `config.decoder_start_token_id` is correctly defined""" )
model.to(__lowerCAmelCase )
if args.max_length:
lowercase_ = args.max_length
if args.num_beams:
lowercase_ = args.num_beams
if args.output_file_path:
lowercase_ = args.output_file_path
else:
lowercase_ = '''BART.onnx'''
logger.info("""Exporting model to ONNX""" )
export_and_validate_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 350
|
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCAmelCase : Optional[Any] = "platform"
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class SCREAMING_SNAKE_CASE__ :
lowercase__ = PegasusConfig
lowercase__ = {}
lowercase__ = "gelu"
def __init__( self : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any]=1_3 , lowerCAmelCase_ : Any=7 , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : str=9_9 , lowerCAmelCase_ : Tuple=3_2 , lowerCAmelCase_ : Dict=5 , lowerCAmelCase_ : Union[str, Any]=4 , lowerCAmelCase_ : Dict=3_7 , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : Optional[int]=2_0 , lowerCAmelCase_ : Tuple=2 , lowerCAmelCase_ : List[str]=1 , lowerCAmelCase_ : Optional[Any]=0 , ):
"""simple docstring"""
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = eos_token_id
lowercase_ = pad_token_id
lowercase_ = bos_token_id
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size).clip(3 , self.vocab_size)
lowercase_ = np.expand_dims(np.array([self.eos_token_id] * self.batch_size) , 1)
lowercase_ = np.concatenate([input_ids, eos_tensor] , axis=1)
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowercase_ = prepare_pegasus_inputs_dict(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
return config, inputs_dict
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
lowercase_ = 2_0
lowercase_ = model_class_name(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""])
lowercase_ , lowercase_ = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowercase_ = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""")
lowercase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase_ = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""")
lowercase_ = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = model.decode(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''')
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict):
"""simple docstring"""
lowercase_ = 2_0
lowercase_ = model_class_name(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""])
lowercase_ , lowercase_ = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowercase_ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
] , axis=-1 , )
lowercase_ = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase_ = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""")
lowercase_ = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = model.decode(lowerCAmelCase_ , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_)
lowercase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''')
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , ) -> Optional[Any]:
'''simple docstring'''
if attention_mask is None:
lowercase_ = np.not_equal(__lowerCAmelCase , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
lowercase_ = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , unittest.TestCase ):
lowercase__ = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
lowercase__ = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
lowercase__ = True
lowercase__ = False
lowercase__ = False
lowercase__ = False
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = FlaxPegasusModelTester(self)
lowercase_ = ConfigTester(self , config_class=lowerCAmelCase_)
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowercase_ = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = model_class(lowerCAmelCase_)
@jax.jit
def encode_jitted(lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int]=None , **lowerCAmelCase_ : Optional[int]):
return model.encode(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_)
with self.subTest("""JIT Enabled"""):
lowercase_ = encode_jitted(**lowerCAmelCase_).to_tuple()
with self.subTest("""JIT Disabled"""):
with jax.disable_jit():
lowercase_ = encode_jitted(**lowerCAmelCase_).to_tuple()
self.assertEqual(len(lowerCAmelCase_) , len(lowerCAmelCase_))
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowercase_ = model_class(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""])
lowercase_ = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict):
return model.decode(
decoder_input_ids=lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , encoder_outputs=lowerCAmelCase_ , )
with self.subTest("""JIT Enabled"""):
lowercase_ = decode_jitted(**lowerCAmelCase_).to_tuple()
with self.subTest("""JIT Disabled"""):
with jax.disable_jit():
lowercase_ = decode_jitted(**lowerCAmelCase_).to_tuple()
self.assertEqual(len(lowerCAmelCase_) , len(lowerCAmelCase_))
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowercase_ = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=lowerCAmelCase_)
lowercase_ = np.ones((1, 1))
lowercase_ = model(lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
@slow
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""")
lowercase_ = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""")
lowercase_ = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
lowercase_ = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
lowercase_ = tokenizer(lowerCAmelCase_ , return_tensors="""np""" , truncation=lowerCAmelCase_ , max_length=5_1_2 , padding=lowerCAmelCase_)
lowercase_ = model.generate(**lowerCAmelCase_ , num_beams=2).sequences
lowercase_ = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_)
assert tgt_text == decoded
| 313
| 0
|
"""simple docstring"""
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class SCREAMING_SNAKE_CASE__ ( yaml.SafeLoader ):
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Dict):
"""simple docstring"""
lowercase_ = [self.constructed_objects[key_node] for key_node, _ in node.value]
lowercase_ = [tuple(lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else key for key in keys]
lowercase_ = Counter(lowerCAmelCase_)
lowercase_ = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F'''Got duplicate yaml keys: {duplicate_keys}''')
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict=False):
"""simple docstring"""
lowercase_ = super().construct_mapping(lowerCAmelCase_ , deep=lowerCAmelCase_)
self._check_no_duplicates_on_constructed_node(lowerCAmelCase_)
return mapping
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Tuple[Optional[str], str]:
lowercase_ = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
lowercase_ = full_content[1:].index("""---""" ) + 1
lowercase_ = '\n'.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(_UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
# class attributes
lowercase__ = {"train_eval_index"} # train-eval-index in the YAML metadata
@classmethod
def _UpperCAmelCase ( cls : str , lowerCAmelCase_ : Path):
"""simple docstring"""
with open(lowerCAmelCase_ , encoding="""utf-8""") as readme_file:
lowercase_ = _split_yaml_from_readme(readme_file.read())
if yaml_string is not None:
return cls.from_yaml_string(lowerCAmelCase_)
else:
return cls()
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Path):
"""simple docstring"""
if path.exists():
with open(lowerCAmelCase_ , encoding="""utf-8""") as readme_file:
lowercase_ = readme_file.read()
else:
lowercase_ = None
lowercase_ = self._to_readme(lowerCAmelCase_)
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""") as readme_file:
readme_file.write(lowerCAmelCase_)
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : Optional[str] = None):
"""simple docstring"""
if readme_content is not None:
lowercase_ = _split_yaml_from_readme(lowerCAmelCase_)
lowercase_ = '---\n' + self.to_yaml_string() + '---\n' + content
else:
lowercase_ = '---\n' + self.to_yaml_string() + '---\n'
return full_content
@classmethod
def _UpperCAmelCase ( cls : Union[str, Any] , lowerCAmelCase_ : str):
"""simple docstring"""
lowercase_ = yaml.load(lowerCAmelCase_ , Loader=_NoDuplicateSafeLoader) or {}
# Convert the YAML keys to DatasetMetadata fields
lowercase_ = {
(key.replace("""-""" , """_""") if key.replace("""-""" , """_""") in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
return yaml.safe_dump(
{
(key.replace("""_""" , """-""") if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=lowerCAmelCase_ , allow_unicode=lowerCAmelCase_ , encoding="""utf-8""" , ).decode("""utf-8""")
UpperCAmelCase : Tuple = {
"""image-classification""": [],
"""translation""": [],
"""image-segmentation""": [],
"""fill-mask""": [],
"""automatic-speech-recognition""": [],
"""token-classification""": [],
"""sentence-similarity""": [],
"""audio-classification""": [],
"""question-answering""": [],
"""summarization""": [],
"""zero-shot-classification""": [],
"""table-to-text""": [],
"""feature-extraction""": [],
"""other""": [],
"""multiple-choice""": [],
"""text-classification""": [],
"""text-to-image""": [],
"""text2text-generation""": [],
"""zero-shot-image-classification""": [],
"""tabular-classification""": [],
"""tabular-regression""": [],
"""image-to-image""": [],
"""tabular-to-text""": [],
"""unconditional-image-generation""": [],
"""text-retrieval""": [],
"""text-to-speech""": [],
"""object-detection""": [],
"""audio-to-audio""": [],
"""text-generation""": [],
"""conversational""": [],
"""table-question-answering""": [],
"""visual-question-answering""": [],
"""image-to-text""": [],
"""reinforcement-learning""": [],
"""voice-activity-detection""": [],
"""time-series-forecasting""": [],
"""document-question-answering""": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
UpperCAmelCase : Tuple = ArgumentParser(usage="Validate the yaml metadata block of a README.md file.")
ap.add_argument("readme_filepath")
UpperCAmelCase : str = ap.parse_args()
UpperCAmelCase : Optional[int] = Path(args.readme_filepath)
UpperCAmelCase : Union[str, Any] = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 351
|
"""simple docstring"""
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class SCREAMING_SNAKE_CASE__ :
lowercase__ = None
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = None
lowercase__ = None
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = True
lowercase__ = None
lowercase__ = 1
lowercase__ = None
lowercase__ = False
lowercase__ = None
lowercase__ = None
def _UpperCAmelCase ( self : int):
"""simple docstring"""
return self.__class__(**{k: copy.deepcopy(lowerCAmelCase_) for k, v in self.__dict__.items()})
| 313
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : List[str] = logging.get_logger(__name__)
UpperCAmelCase : Union[str, Any] = {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ ):
lowercase__ = """speech_to_text_2"""
lowercase__ = ["""past_key_values"""]
lowercase__ = {"""num_attention_heads""": """decoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : List[Any] , lowerCAmelCase_ : Optional[Any]=1_0_0_0_0 , lowerCAmelCase_ : Tuple=6 , lowerCAmelCase_ : Optional[Any]=2_0_4_8 , lowerCAmelCase_ : Any=4 , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Dict="relu" , lowerCAmelCase_ : str=2_5_6 , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : Tuple=0.0 , lowerCAmelCase_ : List[str]=0.0 , lowerCAmelCase_ : Optional[Any]=0.02 , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Union[str, Any]=1 , lowerCAmelCase_ : Optional[int]=0 , lowerCAmelCase_ : Tuple=2 , lowerCAmelCase_ : Dict=1_0_2_4 , **lowerCAmelCase_ : List[str] , ):
"""simple docstring"""
lowercase_ = vocab_size
lowercase_ = d_model
lowercase_ = decoder_ffn_dim
lowercase_ = decoder_layers
lowercase_ = decoder_attention_heads
lowercase_ = dropout
lowercase_ = attention_dropout
lowercase_ = activation_dropout
lowercase_ = activation_function
lowercase_ = init_std
lowercase_ = decoder_layerdrop
lowercase_ = use_cache
lowercase_ = decoder_layers
lowercase_ = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase_ = max_target_positions
super().__init__(
pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , decoder_start_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
| 352
|
"""simple docstring"""
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
UpperCAmelCase : Any = logging.get_logger(__name__)
UpperCAmelCase : Dict = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase : List[Any] = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
UpperCAmelCase : Union[str, Any] = {
"allenai/led-base-16384": 1_6384,
}
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = LEDTokenizer
lowercase__ = ["input_ids", "attention_mask"]
def __init__( self : Dict , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[Any]="replace" , lowerCAmelCase_ : Dict="<s>" , lowerCAmelCase_ : Union[str, Any]="</s>" , lowerCAmelCase_ : List[Any]="</s>" , lowerCAmelCase_ : Optional[Any]="<s>" , lowerCAmelCase_ : Union[str, Any]="<unk>" , lowerCAmelCase_ : List[str]="<pad>" , lowerCAmelCase_ : Dict="<mask>" , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : List[Any]=True , **lowerCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(
lowerCAmelCase_ , lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , errors=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , trim_offsets=lowerCAmelCase_ , **lowerCAmelCase_ , )
lowercase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get("""add_prefix_space""" , lowerCAmelCase_) != add_prefix_space:
lowercase_ = getattr(lowerCAmelCase_ , pre_tok_state.pop("""type"""))
lowercase_ = add_prefix_space
lowercase_ = pre_tok_class(**lowerCAmelCase_)
lowercase_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase_ = """post_processor"""
lowercase_ = getattr(self.backend_tokenizer , lowerCAmelCase_ , lowerCAmelCase_)
if tokenizer_component_instance:
lowercase_ = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase_ = tuple(state["""sep"""])
if "cls" in state:
lowercase_ = tuple(state["""cls"""])
lowercase_ = False
if state.get("""add_prefix_space""" , lowerCAmelCase_) != add_prefix_space:
lowercase_ = add_prefix_space
lowercase_ = True
if state.get("""trim_offsets""" , lowerCAmelCase_) != trim_offsets:
lowercase_ = trim_offsets
lowercase_ = True
if changes_to_apply:
lowercase_ = getattr(lowerCAmelCase_ , state.pop("""type"""))
lowercase_ = component_class(**lowerCAmelCase_)
setattr(self.backend_tokenizer , lowerCAmelCase_ , lowerCAmelCase_)
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""")
return None
return str(self._mask_token)
@mask_token.setter
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : str):
"""simple docstring"""
lowercase_ = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else value
lowercase_ = value
def _UpperCAmelCase ( self : Dict , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : List[Any]):
"""simple docstring"""
lowercase_ = kwargs.get("""is_split_into_words""" , lowerCAmelCase_)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""")
return super()._batch_encode_plus(*lowerCAmelCase_ , **lowerCAmelCase_)
def _UpperCAmelCase ( self : Union[str, Any] , *lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : Any):
"""simple docstring"""
lowercase_ = kwargs.get("""is_split_into_words""" , lowerCAmelCase_)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""")
return super()._encode_plus(*lowerCAmelCase_ , **lowerCAmelCase_)
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None):
"""simple docstring"""
lowercase_ = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_)
return tuple(lowerCAmelCase_)
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any]=None):
"""simple docstring"""
lowercase_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None):
"""simple docstring"""
lowercase_ = [self.sep_token_id]
lowercase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[bool] = None , ):
"""simple docstring"""
lowercase_ = super()._pad(
encoded_inputs=lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding_strategy=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , )
# Load from model defaults
if return_attention_mask is None:
lowercase_ = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowercase_ = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowercase_ = len(encoded_inputs["""global_attention_mask"""]) != len(lowerCAmelCase_)
if needs_to_be_padded:
lowercase_ = len(lowerCAmelCase_) - len(encoded_inputs["""global_attention_mask"""])
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowercase_ = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
lowercase_ = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side))
return encoded_inputs
| 313
| 0
|
"""simple docstring"""
import cva
import numpy as np
class SCREAMING_SNAKE_CASE__ :
def __init__( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any]):
"""simple docstring"""
if k in (0.04, 0.06):
lowercase_ = k
lowercase_ = window_size
else:
raise ValueError("""invalid k value""")
def __str__( self : List[str]):
"""simple docstring"""
return str(self.k)
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
lowercase_ = cva.imread(lowerCAmelCase_ , 0)
lowercase_ , lowercase_ = img.shape
lowercase_ = []
lowercase_ = img.copy()
lowercase_ = cva.cvtColor(lowerCAmelCase_ , cva.COLOR_GRAY2RGB)
lowercase_ , lowercase_ = np.gradient(lowerCAmelCase_)
lowercase_ = dx**2
lowercase_ = dy**2
lowercase_ = dx * dy
lowercase_ = 0.04
lowercase_ = self.window_size // 2
for y in range(lowerCAmelCase_ , h - offset):
for x in range(lowerCAmelCase_ , w - offset):
lowercase_ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase_ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase_ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase_ = (wxx * wyy) - (wxy**2)
lowercase_ = wxx + wyy
lowercase_ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r])
color_img.itemset((y, x, 0) , 0)
color_img.itemset((y, x, 1) , 0)
color_img.itemset((y, x, 2) , 2_5_5)
return color_img, corner_list
if __name__ == "__main__":
UpperCAmelCase : Optional[Any] = HarrisCorner(0.04, 3)
UpperCAmelCase , UpperCAmelCase : str = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 353
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase : Union[str, Any] = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : int = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
UpperCAmelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 313
| 0
|
"""simple docstring"""
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
UpperCAmelCase : List[Any] = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation="relu")
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation="relu"))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation="relu"))
classifier.add(layers.Dense(units=1, activation="sigmoid"))
# Compiling the CNN
classifier.compile(
optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"]
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
UpperCAmelCase : Optional[Any] = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
UpperCAmelCase : str = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
UpperCAmelCase : List[Any] = train_datagen.flow_from_directory(
"dataset/training_set", target_size=(64, 64), batch_size=32, class_mode="binary"
)
UpperCAmelCase : Any = test_datagen.flow_from_directory(
"dataset/test_set", target_size=(64, 64), batch_size=32, class_mode="binary"
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save("cnn.h5")
# Part 3 - Making new predictions
UpperCAmelCase : List[Any] = tf.keras.preprocessing.image.load_img(
"dataset/single_prediction/image.png", target_size=(64, 64)
)
UpperCAmelCase : Optional[int] = tf.keras.preprocessing.image.img_to_array(test_image)
UpperCAmelCase : Dict = np.expand_dims(test_image, axis=0)
UpperCAmelCase : Optional[Any] = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
UpperCAmelCase : Optional[int] = "Normal"
if result[0][0] == 1:
UpperCAmelCase : str = "Abnormality detected"
| 354
|
"""simple docstring"""
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any]=1_3 , lowerCAmelCase_ : Optional[Any]=7 , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : int=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Tuple=9_9 , lowerCAmelCase_ : List[str]=6_4 , lowerCAmelCase_ : Optional[int]=3_2 , lowerCAmelCase_ : Optional[Any]=5 , lowerCAmelCase_ : Optional[int]=4 , lowerCAmelCase_ : str=3_7 , lowerCAmelCase_ : Any="gelu" , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : int=5_1_2 , lowerCAmelCase_ : Optional[int]=1_6 , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : List[Any]=0.02 , lowerCAmelCase_ : Optional[int]=3 , lowerCAmelCase_ : Optional[Any]=4 , lowerCAmelCase_ : Dict=None , ):
"""simple docstring"""
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_input_mask
lowercase_ = use_token_type_ids
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = embedding_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = num_labels
lowercase_ = num_choices
lowercase_ = scope
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase_ = None
if self.use_input_mask:
lowercase_ = random_attention_mask([self.batch_size, self.seq_length])
lowercase_ = None
if self.use_token_type_ids:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
lowercase_ = None
lowercase_ = None
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
lowercase_ = ids_tensor([self.batch_size] , self.num_choices)
lowercase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict):
"""simple docstring"""
lowercase_ = MegatronBertModel(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_)
lowercase_ = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_)
lowercase_ = model(lowerCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str]):
"""simple docstring"""
lowercase_ = MegatronBertForMaskedLM(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int]):
"""simple docstring"""
lowercase_ = MegatronBertForCausalLM(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
lowercase_ = MegatronBertForNextSentencePrediction(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
lowercase_ = MegatronBertForPreTraining(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , next_sentence_label=lowerCAmelCase_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple):
"""simple docstring"""
lowercase_ = MegatronBertForQuestionAnswering(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str]):
"""simple docstring"""
lowercase_ = self.num_labels
lowercase_ = MegatronBertForSequenceClassification(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict):
"""simple docstring"""
lowercase_ = self.num_labels
lowercase_ = MegatronBertForTokenClassification(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
lowercase_ = self.num_choices
lowercase_ = MegatronBertForMultipleChoice(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
lowercase_ = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
lowercase_ = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
lowercase_ = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
lowercase__ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase__ = (
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase__ = True
# test_resize_embeddings = False
lowercase__ = False
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any]=False):
"""simple docstring"""
lowercase_ = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_)
if return_labels:
if model_class in get_values(lowerCAmelCase_):
lowercase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase_)
lowercase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_)
return inputs_dict
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = MegatronBertModelTester(self)
lowercase_ = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=3_7)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*lowerCAmelCase_)
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*lowerCAmelCase_)
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*lowerCAmelCase_)
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*lowerCAmelCase_)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
return torch.tensor(
__lowerCAmelCase , dtype=torch.long , device=__lowerCAmelCase , )
UpperCAmelCase : Any = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
@unittest.skip("""Model is not available.""")
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = """nvidia/megatron-bert-uncased-345m"""
if "MYDIR" in os.environ:
lowercase_ = os.path.join(os.environ["""MYDIR"""] , lowerCAmelCase_)
lowercase_ = MegatronBertModel.from_pretrained(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.half()
lowercase_ = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]])
with torch.no_grad():
lowercase_ = model(lowerCAmelCase_)[0]
lowercase_ = torch.Size((1, 9, 1_0_2_4))
self.assertEqual(output.shape , lowerCAmelCase_)
lowercase_ = [-0.6_040, -0.2_517, -0.1_025, 0.3_420, -0.6_758, -0.0_017, -0.1_089, -0.1_990, 0.5_728]
for ii in range(3):
for jj in range(3):
lowercase_ = output[0, ii, jj]
lowercase_ = expected[3 * ii + jj]
lowercase_ = """ii={} jj={} a={} b={}""".format(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
self.assertTrue(math.isclose(lowerCAmelCase_ , lowerCAmelCase_ , rel_tol=lowerCAmelCase_ , abs_tol=lowerCAmelCase_) , msg=lowerCAmelCase_)
| 313
| 0
|
"""simple docstring"""
UpperCAmelCase : Optional[Any] = 'Tobias Carryer'
from time import time
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple=int(time())): # noqa: B008
"""simple docstring"""
lowercase_ = multiplier
lowercase_ = increment
lowercase_ = modulo
lowercase_ = seed
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
UpperCAmelCase : str = LinearCongruentialGenerator(166_4525, 10_1390_4223, 2 << 31)
while True:
print(lcg.next_number())
| 355
|
"""simple docstring"""
from __future__ import annotations
import numpy as np
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> tuple[np.ndarray, np.ndarray]:
'''simple docstring'''
lowercase_ , lowercase_ = np.shape(__lowerCAmelCase )
if rows != columns:
lowercase_ = (
"""'table' has to be of square shaped array but got a """
F'''{rows}x{columns} array:\n{table}'''
)
raise ValueError(__lowerCAmelCase )
lowercase_ = np.zeros((rows, columns) )
lowercase_ = np.zeros((rows, columns) )
for i in range(__lowerCAmelCase ):
for j in range(__lowerCAmelCase ):
lowercase_ = sum(lower[i][k] * upper[k][j] for k in range(__lowerCAmelCase ) )
if upper[j][j] == 0:
raise ArithmeticError("""No LU decomposition exists""" )
lowercase_ = (table[i][j] - total) / upper[j][j]
lowercase_ = 1
for j in range(__lowerCAmelCase , __lowerCAmelCase ):
lowercase_ = sum(lower[i][k] * upper[k][j] for k in range(__lowerCAmelCase ) )
lowercase_ = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313
| 0
|
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
if n_term == "":
return []
lowercase_ = []
for temp in range(int(__lowerCAmelCase ) ):
series.append(F'''1/{temp + 1}''' if series else """1""" )
return series
if __name__ == "__main__":
UpperCAmelCase : Dict = input("Enter the last number (nth term) of the Harmonic Series")
print("Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n")
print(harmonic_series(nth_term))
| 356
|
"""simple docstring"""
import os
from collections.abc import Iterator
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = "." ) -> Iterator[str]:
'''simple docstring'''
for dir_path, dir_names, filenames in os.walk(__lowerCAmelCase ):
lowercase_ = [d for d in dir_names if d != """scripts""" and d[0] not in """._"""]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(__lowerCAmelCase )[1] in (".py", ".ipynb"):
yield os.path.join(__lowerCAmelCase , __lowerCAmelCase ).lstrip("""./""" )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return F'''{i * " "}*''' if i else "\n##"
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(__lowerCAmelCase ) or old_parts[i] != new_part) and new_part:
print(F'''{md_prefix(__lowerCAmelCase )} {new_part.replace("_" , " " ).title()}''' )
return new_path
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = "." ) -> None:
'''simple docstring'''
lowercase_ = """"""
for filepath in sorted(good_file_paths(__lowerCAmelCase ) ):
lowercase_ , lowercase_ = os.path.split(__lowerCAmelCase )
if filepath != old_path:
lowercase_ = print_path(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = (filepath.count(os.sep ) + 1) if filepath else 0
lowercase_ = F'''{filepath}/{filename}'''.replace(""" """ , """%20""" )
lowercase_ = os.path.splitext(filename.replace("""_""" , """ """ ).title() )[0]
print(F'''{md_prefix(__lowerCAmelCase )} [{filename}]({url})''' )
if __name__ == "__main__":
print_directory_md(".")
| 313
| 0
|
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
UpperCAmelCase : Any = logging.get_logger(__name__)
UpperCAmelCase : str = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase : Optional[int] = {
"vocab_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json",
},
"merges_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt",
},
"tokenizer_file": {
"Salesforce/codegen-350M-mono": (
"https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase : Union[str, Any] = {
"Salesforce/codegen-350M-mono": 2048,
}
class SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ["input_ids", "attention_mask"]
lowercase__ = CodeGenTokenizer
def __init__( self : int , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Optional[Any]="<|endoftext|>" , lowerCAmelCase_ : List[str]="<|endoftext|>" , lowerCAmelCase_ : List[Any]="<|endoftext|>" , lowerCAmelCase_ : Optional[Any]=False , **lowerCAmelCase_ : Tuple , ):
"""simple docstring"""
super().__init__(
lowerCamelCase__ , lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , unk_token=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , **lowerCamelCase__ , )
if kwargs.pop("""add_bos_token""" , lowerCamelCase__):
lowercase_ = kwargs.pop("""name_or_path""" , """""")
raise ValueError(
"""Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token."""
"""Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n"""
F'''`CodeGenTokenizer.from_pretrained(\'{model_id}\')`\nor\n'''
F'''`AutoTokenizer.from_pretrained(\'{model_id}\', use_fast=False)`\n'''
"""This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."""
""" so that the fast tokenizer works correctly.""")
lowercase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get("""add_prefix_space""" , lowerCamelCase__) != add_prefix_space:
lowercase_ = getattr(lowerCamelCase__ , pre_tok_state.pop("""type"""))
lowercase_ = add_prefix_space
lowercase_ = pre_tok_class(**lowerCamelCase__)
lowercase_ = add_prefix_space
def _UpperCAmelCase ( self : List[Any] , *lowerCAmelCase_ : Dict , **lowerCAmelCase_ : List[Any]):
"""simple docstring"""
lowercase_ = kwargs.get("""is_split_into_words""" , lowerCamelCase__)
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCamelCase__ , **lowerCamelCase__)
def _UpperCAmelCase ( self : Tuple , *lowerCAmelCase_ : int , **lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
lowercase_ = kwargs.get("""is_split_into_words""" , lowerCamelCase__)
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCamelCase__ , **lowerCamelCase__)
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None):
"""simple docstring"""
lowercase_ = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__)
return tuple(lowerCamelCase__)
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"] , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Optional[List[str]] = None , **lowerCAmelCase_ : Tuple , ):
"""simple docstring"""
lowercase_ = super().decode(
token_ids=lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ , clean_up_tokenization_spaces=lowerCamelCase__ , **lowerCamelCase__ , )
if truncate_before_pattern is not None and len(lowerCamelCase__) > 0:
lowercase_ = self.truncate(lowerCamelCase__ , lowerCamelCase__)
return decoded_text
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int]):
"""simple docstring"""
def find_re(lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple):
lowercase_ = pattern.search(lowerCamelCase__ , lowerCamelCase__)
return m.start() if m else -1
lowercase_ = [re.compile(lowerCamelCase__ , re.MULTILINE) for pattern in truncate_before_pattern]
lowercase_ = list(re.finditer("""^print""" , lowerCamelCase__ , re.MULTILINE))
if len(lowerCamelCase__) > 1:
lowercase_ = completion[: prints[1].start()]
lowercase_ = list(re.finditer("""^def""" , lowerCamelCase__ , re.MULTILINE))
if len(lowerCamelCase__) > 1:
lowercase_ = completion[: defs[1].start()]
lowercase_ = 0
lowercase_ = [
pos for pos in [find_re(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__) for terminal in terminals] if pos != -1
]
if len(lowerCamelCase__) > 0:
return completion[: min(lowerCamelCase__)]
else:
return completion
| 357
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> list:
'''simple docstring'''
if any(not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or x < 0 for x in sequence ):
raise TypeError("""Sequence must be list of non-negative integers""" )
for _ in range(len(__lowerCAmelCase ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(__lowerCAmelCase , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 313
| 0
|
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Any:
'''simple docstring'''
if (ksize % 2) == 0:
lowercase_ = ksize + 1
lowercase_ = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(A__ ):
for x in range(A__ ):
# distance from center
lowercase_ = x - ksize // 2
lowercase_ = y - ksize // 2
# degree to radiant
lowercase_ = theta / 1_80 * np.pi
lowercase_ = np.cos(_theta )
lowercase_ = np.sin(_theta )
# get kernel x
lowercase_ = cos_theta * px + sin_theta * py
# get kernel y
lowercase_ = -sin_theta * px + cos_theta * py
# fill kernel
lowercase_ = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
UpperCAmelCase : Optional[Any] = imread("../image_data/lena.jpg")
# turn image in gray scale value
UpperCAmelCase : int = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
UpperCAmelCase : Dict = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
UpperCAmelCase : Optional[int] = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
UpperCAmelCase : Dict = out / out.max() * 255
UpperCAmelCase : Union[str, Any] = out.astype(np.uinta)
imshow("Original", gray)
imshow("Gabor filter with 20x20 mask and 6 directions", out)
waitKey(0)
| 358
|
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCAmelCase : Tuple = "platform"
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , ) -> List[Any]:
'''simple docstring'''
if attention_mask is None:
lowercase_ = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
lowercase_ = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
lowercase_ = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowercase_ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowercase_ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Optional[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int=1_3 , lowerCAmelCase_ : Optional[Any]=7 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : Dict=9_9 , lowerCAmelCase_ : List[Any]=1_6 , lowerCAmelCase_ : Union[str, Any]=2 , lowerCAmelCase_ : str=4 , lowerCAmelCase_ : Dict=4 , lowerCAmelCase_ : List[str]="gelu" , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : str=3_2 , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : Any=1 , lowerCAmelCase_ : str=0 , lowerCAmelCase_ : List[Any]=0.02 , ):
"""simple docstring"""
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = eos_token_id
lowercase_ = pad_token_id
lowercase_ = bos_token_id
lowercase_ = initializer_range
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size) , 3 , self.vocab_size)
lowercase_ = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa)) , -1)
lowercase_ = shift_tokens_right(lowerCAmelCase_ , 1 , 2)
lowercase_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowerCAmelCase_ , )
lowercase_ = prepare_blenderbot_inputs_dict(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
return config, inputs_dict
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ , lowercase_ = self.prepare_config_and_inputs()
return config, inputs_dict
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str):
"""simple docstring"""
lowercase_ = 2_0
lowercase_ = model_class_name(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""])
lowercase_ , lowercase_ = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowercase_ = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""")
lowercase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase_ = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""")
lowercase_ = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = model.decode(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''')
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any]):
"""simple docstring"""
lowercase_ = 2_0
lowercase_ = model_class_name(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""])
lowercase_ , lowercase_ = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowercase_ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
] , axis=-1 , )
lowercase_ = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase_ = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""")
lowercase_ = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = model.decode(lowerCAmelCase_ , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_)
lowercase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''')
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
lowercase__ = 99
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
lowercase_ = input_ids.shape[0]
lowercase_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ , lowercase_ , lowercase_ = self._get_config_and_data()
lowercase_ = FlaxBlenderbotForConditionalGeneration(lowerCAmelCase_)
lowercase_ = lm_model(input_ids=lowerCAmelCase_)
lowercase_ = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
lowercase_ = FlaxBlenderbotForConditionalGeneration(lowerCAmelCase_)
lowercase_ = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa)
lowercase_ = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa)
lowercase_ = lm_model(input_ids=lowerCAmelCase_ , decoder_input_ids=lowerCAmelCase_)
lowercase_ = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa)
lowercase_ = shift_tokens_right(lowerCAmelCase_ , 1 , 2)
lowercase_ = np.equal(lowerCAmelCase_ , 1).astype(np.floataa).sum()
lowercase_ = np.equal(lowerCAmelCase_ , 1).astype(np.floataa).sum()
self.assertEqual(shifted.shape , input_ids.shape)
self.assertEqual(lowerCAmelCase_ , n_pad_before - 1)
self.assertTrue(np.equal(shifted[:, 0] , 2).all())
@require_flax
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , unittest.TestCase , __UpperCAmelCase ):
lowercase__ = True
lowercase__ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowercase__ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = FlaxBlenderbotModelTester(self)
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowercase_ = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = model_class(lowerCAmelCase_)
@jax.jit
def encode_jitted(lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any]=None , **lowerCAmelCase_ : str):
return model.encode(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_)
with self.subTest("""JIT Enabled"""):
lowercase_ = encode_jitted(**lowerCAmelCase_).to_tuple()
with self.subTest("""JIT Disabled"""):
with jax.disable_jit():
lowercase_ = encode_jitted(**lowerCAmelCase_).to_tuple()
self.assertEqual(len(lowerCAmelCase_) , len(lowerCAmelCase_))
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowercase_ = model_class(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""])
lowercase_ = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any]):
return model.decode(
decoder_input_ids=lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , encoder_outputs=lowerCAmelCase_ , )
with self.subTest("""JIT Enabled"""):
lowercase_ = decode_jitted(**lowerCAmelCase_).to_tuple()
with self.subTest("""JIT Disabled"""):
with jax.disable_jit():
lowercase_ = decode_jitted(**lowerCAmelCase_).to_tuple()
self.assertEqual(len(lowerCAmelCase_) , len(lowerCAmelCase_))
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowercase_ = model_class_name.from_pretrained("""facebook/blenderbot-400M-distill""")
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
lowercase_ = np.ones((1, 1)) * model.config.eos_token_id
lowercase_ = model(lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
@unittest.skipUnless(jax_device != """cpu""" , """3B test too slow on CPU.""")
@slow
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = {"""num_beams""": 1, """early_stopping""": True, """min_length""": 1_5, """max_length""": 2_5}
lowercase_ = {"""skip_special_tokens""": True, """clean_up_tokenization_spaces""": True}
lowercase_ = FlaxBlenderbotForConditionalGeneration.from_pretrained("""facebook/blenderbot-3B""" , from_pt=lowerCAmelCase_)
lowercase_ = BlenderbotTokenizer.from_pretrained("""facebook/blenderbot-3B""")
lowercase_ = ["""Sam"""]
lowercase_ = tokenizer(lowerCAmelCase_ , return_tensors="""jax""")
lowercase_ = model.generate(**lowerCAmelCase_ , **lowerCAmelCase_)
lowercase_ = """Sam is a great name. It means \"sun\" in Gaelic."""
lowercase_ = tokenizer.batch_decode(lowerCAmelCase_ , **lowerCAmelCase_)
assert generated_txt[0].strip() == tgt_text
| 313
| 0
|
"""simple docstring"""
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
UpperCAmelCase : Any = 'src/transformers'
UpperCAmelCase : List[str] = 'docs/source/en/tasks'
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
'''simple docstring'''
with open(_UpperCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowercase_ = f.readlines()
# Find the start prompt.
lowercase_ = 0
while not lines[start_index].startswith(_UpperCAmelCase ):
start_index += 1
start_index += 1
lowercase_ = start_index
while not lines[end_index].startswith(_UpperCAmelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
UpperCAmelCase : Any = direct_transformers_import(TRANSFORMERS_PATH)
UpperCAmelCase : Optional[Any] = {
'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
UpperCAmelCase : List[str] = {
'summarization.md': ('nllb',),
'translation.md': ('nllb',),
}
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase_ = TASK_GUIDE_TO_MODELS[task_guide]
lowercase_ = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(_UpperCAmelCase , set() )
lowercase_ = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F'''[{name}](../model_doc/{code})''' for code, name in model_names.items()] ) + "\n"
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase=False ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = _find_text_in_file(
filename=os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , start_prompt="""<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->""" , end_prompt="""<!--End of the generated tip-->""" , )
lowercase_ = get_model_list_for_task(_UpperCAmelCase )
if current_list != new_list:
if overwrite:
with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F'''The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'''
""" to fix this.""" )
if __name__ == "__main__":
UpperCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
UpperCAmelCase : Any = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 359
|
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
UpperCAmelCase : Dict = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
UpperCAmelCase : Union[str, Any] = 10
UpperCAmelCase : Union[str, Any] = 256
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Optional[MinHash]:
'''simple docstring'''
if len(__lowerCAmelCase ) < MIN_NUM_TOKENS:
return None
lowercase_ = MinHash(num_perm=__lowerCAmelCase )
for token in set(__lowerCAmelCase ):
min_hash.update(token.encode() )
return min_hash
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Set[str]:
'''simple docstring'''
return {t for t in NON_ALPHA.split(__lowerCAmelCase ) if len(t.strip() ) > 0}
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[str] , *,
lowerCAmelCase_ : float = 0.85 , ):
"""simple docstring"""
lowercase_ = duplication_jaccard_threshold
lowercase_ = NUM_PERM
lowercase_ = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm)
lowercase_ = defaultdict(lowerCAmelCase_)
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : MinHash):
"""simple docstring"""
lowercase_ = self._index.query(lowerCAmelCase_)
if code_key in self._index.keys:
print(F'''Duplicate key {code_key}''')
return
self._index.insert(lowerCAmelCase_ , lowerCAmelCase_)
if len(lowerCAmelCase_) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(lowerCAmelCase_)
break
else:
self._duplicate_clusters[close_duplicates[0]].add(lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = []
for base, duplicates in self._duplicate_clusters.items():
lowercase_ = [base] + list(lowerCAmelCase_)
# reformat the cluster to be a list of dict
lowercase_ = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(lowerCAmelCase_)
return duplicate_clusters
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : List[str]):
"""simple docstring"""
lowercase_ = self.get_duplicate_clusters()
with open(lowerCAmelCase_ , """w""") as f:
json.dump(lowerCAmelCase_ , lowerCAmelCase_)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ , lowercase_ = element
lowercase_ = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[str]:
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(__lowerCAmelCase , max_queue_size=1_00_00 ) , chunksize=1_00 , ):
if data is not None:
yield data
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = DuplicationIndex(duplication_jaccard_threshold=__lowerCAmelCase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(__lowerCAmelCase ) ) , max_queue_size=1_00 ) ):
di.add(__lowerCAmelCase , __lowerCAmelCase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> float:
'''simple docstring'''
lowercase_ = get_tokens(__lowerCAmelCase )
lowercase_ = get_tokens(__lowerCAmelCase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
UpperCAmelCase : Optional[Any] = None
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = []
for elementa in cluster:
lowercase_ = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
lowercase_ = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(__lowerCAmelCase , __lowerCAmelCase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
lowercase_ = 1
extremes.append(__lowerCAmelCase )
return extremes
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
global _shared_dataset
lowercase_ = dataset
lowercase_ = []
lowercase_ = partial(_find_cluster_extremes_shared , jaccard_threshold=__lowerCAmelCase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
__lowerCAmelCase , __lowerCAmelCase , ) , total=len(__lowerCAmelCase ) , ):
extremes_list.append(__lowerCAmelCase )
return extremes_list
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
'''simple docstring'''
lowercase_ = make_duplicate_clusters(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
lowercase_ = {}
lowercase_ = find_extremes(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
for extremes in extremes_clusters:
for element in extremes:
lowercase_ = element
lowercase_ = duplicate_indices - set(extreme_dict.keys() )
lowercase_ = dataset.filter(lambda __lowerCAmelCase , __lowerCAmelCase : idx not in remove_indices , with_indices=__lowerCAmelCase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
lowercase_ = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
lowercase_ = extreme_dict[element["""base_index"""]]["""copies"""]
print(F'''Original dataset size: {len(__lowerCAmelCase )}''' )
print(F'''Number of duplicate clusters: {len(__lowerCAmelCase )}''' )
print(F'''Files in duplicate cluster: {len(__lowerCAmelCase )}''' )
print(F'''Unique files in duplicate cluster: {len(__lowerCAmelCase )}''' )
print(F'''Filtered dataset size: {len(__lowerCAmelCase )}''' )
return ds_filter, duplicate_clusters
| 313
| 0
|
"""simple docstring"""
from jiwer import compute_measures
import datasets
UpperCAmelCase : Tuple = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
UpperCAmelCase : str = "\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n"
UpperCAmelCase : Tuple = "\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> wer = datasets.load_metric(\"wer\")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence"""),
"""references""": datasets.Value("""string""" , id="""sequence"""),
}) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
] , )
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : Union[str, Any]=False):
"""simple docstring"""
if concatenate_texts:
return compute_measures(__UpperCamelCase , __UpperCamelCase)["wer"]
else:
lowercase_ = 0
lowercase_ = 0
for prediction, reference in zip(__UpperCamelCase , __UpperCamelCase):
lowercase_ = compute_measures(__UpperCamelCase , __UpperCamelCase)
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 360
|
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
UpperCAmelCase : Union[str, Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["memory_attention", "encoder_attn"],
["attention", "attn"],
["/", "."],
[".LayerNorm.gamma", "_layer_norm.weight"],
[".LayerNorm.beta", "_layer_norm.bias"],
["r.layer_", "r.layers."],
["output_proj", "out_proj"],
["ffn.dense_1.", "fc2."],
["ffn.dense.", "fc1."],
["ffn_layer_norm", "final_layer_norm"],
["kernel", "weight"],
["encoder_layer_norm.", "encoder.layer_norm."],
["decoder_layer_norm.", "decoder.layer_norm."],
["embeddings.weights", "shared.weight"],
]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
lowercase_ = k.replace(__lowerCAmelCase , __lowerCAmelCase )
return k
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> PegasusForConditionalGeneration:
'''simple docstring'''
lowercase_ = DEFAULTS.copy()
cfg_kwargs.update(__lowerCAmelCase )
lowercase_ = PegasusConfig(**__lowerCAmelCase )
lowercase_ = PegasusForConditionalGeneration(__lowerCAmelCase )
lowercase_ = torch_model.model.state_dict()
lowercase_ = {}
for k, v in tf_weights.items():
lowercase_ = rename_state_dict_key(__lowerCAmelCase )
if new_k not in sd:
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
lowercase_ = v.T
lowercase_ = torch.tensor(__lowerCAmelCase , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
lowercase_ = torch.zeros_like(mapping["""shared.weight"""][cfg.pad_token_id + 1] )
lowercase_ = mapping["""shared.weight"""]
lowercase_ = mapping["""shared.weight"""]
lowercase_ = {k: torch.zeros_like(__lowerCAmelCase ) for k, v in sd.items() if k.endswith("""bias""" ) and k not in mapping}
mapping.update(**__lowerCAmelCase )
lowercase_ , lowercase_ = torch_model.model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
lowercase_ = [
k for k in missing if k not in ["""encoder.embed_positions.weight""", """decoder.embed_positions.weight"""]
]
assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], F'''no matches found for the following tf keys {extra}'''
return torch_model
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
'''simple docstring'''
lowercase_ = tf.train.list_variables(__lowerCAmelCase )
lowercase_ = {}
lowercase_ = ["""Adafactor""", """global_step"""]
for name, shape in tqdm(__lowerCAmelCase , desc="""converting tf checkpoint to dict""" ):
lowercase_ = any(pat in name for pat in ignore_name )
if skip_key:
continue
lowercase_ = tf.train.load_variable(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = array
return tf_weights
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = Path(__lowerCAmelCase ).parent.name
lowercase_ = task_specific_params[F'''summarization_{dataset}''']["""max_position_embeddings"""]
lowercase_ = PegasusTokenizer.from_pretrained("""sshleifer/pegasus""" , model_max_length=__lowerCAmelCase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(__lowerCAmelCase )
# convert model
lowercase_ = get_tf_weights_as_numpy(__lowerCAmelCase )
lowercase_ = task_specific_params[F'''summarization_{dataset}''']
if dataset == "large":
lowercase_ = task_specific_params
lowercase_ = convert_pegasus(__lowerCAmelCase , __lowerCAmelCase )
torch_model.save_pretrained(__lowerCAmelCase )
lowercase_ = torch_model.state_dict()
sd.pop("""model.decoder.embed_positions.weight""" )
sd.pop("""model.encoder.embed_positions.weight""" )
torch.save(__lowerCAmelCase , Path(__lowerCAmelCase ) / """pytorch_model.bin""" )
if __name__ == "__main__":
UpperCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
UpperCAmelCase : List[Any] = parser.parse_args()
if args.save_dir is None:
UpperCAmelCase : List[str] = Path(args.tf_ckpt_path).parent.name
UpperCAmelCase : int = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 313
| 0
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : List[Any] = logging.get_logger(__name__)
UpperCAmelCase : int = {
"BridgeTower/bridgetower-base": "https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json",
"BridgeTower/bridgetower-base-itm-mlm": (
"https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json"
),
}
class SCREAMING_SNAKE_CASE__ ( __lowercase ):
lowercase__ = '''bridgetower_vision_model'''
def __init__( self : Dict , lowerCAmelCase_ : int=7_6_8 , lowerCAmelCase_ : List[str]=1_2 , lowerCAmelCase_ : Union[str, Any]=3 , lowerCAmelCase_ : Optional[int]=1_6 , lowerCAmelCase_ : List[Any]=2_8_8 , lowerCAmelCase_ : Any=1 , lowerCAmelCase_ : Optional[int]=1E-05 , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : Optional[Any]=False , **lowerCAmelCase_ : Optional[int] , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase__)
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_channels
lowercase_ = patch_size
lowercase_ = image_size
lowercase_ = initializer_factor
lowercase_ = layer_norm_eps
lowercase_ = stop_gradient
lowercase_ = share_layernorm
lowercase_ = remove_last_layer
@classmethod
def _UpperCAmelCase ( cls : Optional[int] , lowerCAmelCase_ : Union[str, os.PathLike] , **lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
lowercase_ , lowercase_ = cls.get_config_dict(UpperCAmelCase__ , **UpperCAmelCase__)
if config_dict.get("""model_type""") == "bridgetower":
lowercase_ = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""") and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''')
return cls.from_dict(UpperCAmelCase__ , **UpperCAmelCase__)
class SCREAMING_SNAKE_CASE__ ( __lowercase ):
lowercase__ = '''bridgetower_text_model'''
def __init__( self : Dict , lowerCAmelCase_ : Union[str, Any]=5_0_2_6_5 , lowerCAmelCase_ : Tuple=7_6_8 , lowerCAmelCase_ : List[str]=1_2 , lowerCAmelCase_ : Optional[int]=1_2 , lowerCAmelCase_ : List[Any]=1 , lowerCAmelCase_ : List[Any]=3_0_7_2 , lowerCAmelCase_ : List[str]="gelu" , lowerCAmelCase_ : Union[str, Any]=0.1 , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : int=5_1_4 , lowerCAmelCase_ : Union[str, Any]=1 , lowerCAmelCase_ : Tuple=1E-05 , lowerCAmelCase_ : List[str]=1 , lowerCAmelCase_ : List[str]=0 , lowerCAmelCase_ : Any=2 , lowerCAmelCase_ : str="absolute" , lowerCAmelCase_ : str=True , **lowerCAmelCase_ : int , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase__)
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = hidden_act
lowercase_ = initializer_factor
lowercase_ = intermediate_size
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = layer_norm_eps
lowercase_ = position_embedding_type
lowercase_ = use_cache
lowercase_ = pad_token_id
lowercase_ = bos_token_id
lowercase_ = eos_token_id
@classmethod
def _UpperCAmelCase ( cls : Optional[int] , lowerCAmelCase_ : Union[str, os.PathLike] , **lowerCAmelCase_ : List[Any]):
"""simple docstring"""
lowercase_ , lowercase_ = cls.get_config_dict(UpperCAmelCase__ , **UpperCAmelCase__)
if config_dict.get("""model_type""") == "bridgetower":
lowercase_ = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""") and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''')
return cls.from_dict(UpperCAmelCase__ , **UpperCAmelCase__)
class SCREAMING_SNAKE_CASE__ ( __lowercase ):
lowercase__ = '''bridgetower'''
def __init__( self : Tuple , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Tuple="gelu" , lowerCAmelCase_ : Union[str, Any]=7_6_8 , lowerCAmelCase_ : Union[str, Any]=1 , lowerCAmelCase_ : Dict=1E-05 , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Optional[int]="add" , lowerCAmelCase_ : Optional[int]=1_2 , lowerCAmelCase_ : int=6 , lowerCAmelCase_ : str=False , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : int=None , **lowerCAmelCase_ : int , ):
"""simple docstring"""
lowercase_ = kwargs.pop("""text_config_dict""" , UpperCAmelCase__)
lowercase_ = kwargs.pop("""vision_config_dict""" , UpperCAmelCase__)
super().__init__(**UpperCAmelCase__)
lowercase_ = share_cross_modal_transformer_layers
lowercase_ = hidden_act
lowercase_ = hidden_size
lowercase_ = initializer_factor
lowercase_ = layer_norm_eps
lowercase_ = share_link_tower_layers
lowercase_ = link_tower_type
lowercase_ = num_attention_heads
lowercase_ = num_hidden_layers
lowercase_ = tie_word_embeddings
lowercase_ = init_layernorm_from_vision_encoder
if text_config is None:
lowercase_ = {}
logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""")
if vision_config is None:
lowercase_ = {}
logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""")
lowercase_ = BridgeTowerTextConfig(**UpperCAmelCase__)
lowercase_ = BridgeTowerVisionConfig(**UpperCAmelCase__)
@classmethod
def _UpperCAmelCase ( cls : Dict , lowerCAmelCase_ : BridgeTowerTextConfig , lowerCAmelCase_ : BridgeTowerVisionConfig , **lowerCAmelCase_ : Tuple):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCAmelCase__)
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = copy.deepcopy(self.__dict__)
lowercase_ = self.text_config.to_dict()
lowercase_ = self.vision_config.to_dict()
lowercase_ = self.__class__.model_type
return output
| 361
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
def _SCREAMING_SNAKE_CASE () -> Generator[int, None, None]:
'''simple docstring'''
lowercase_ = {}
lowercase_ = 2
while True:
lowercase_ = factor_map.pop(__lowerCAmelCase , __lowerCAmelCase )
if factor:
lowercase_ = factor + prime
while x in factor_map:
x += factor
lowercase_ = factor
else:
lowercase_ = prime
yield prime
prime += 1
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = 1E10 ) -> int:
'''simple docstring'''
lowercase_ = sieve()
lowercase_ = 1
while True:
lowercase_ = next(__lowerCAmelCase )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(__lowerCAmelCase )
n += 2
if __name__ == "__main__":
print(solution())
| 313
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase : str = logging.get_logger(__name__)
UpperCAmelCase : Optional[Any] = "▁"
UpperCAmelCase : int = {"vocab_file": "sentencepiece.bpe.model", "monolingual_vocab_file": "dict.txt"}
UpperCAmelCase : int = {
"vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model",
},
"monolingual_vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt",
},
}
UpperCAmelCase : str = {"vinai/bartpho-syllable": 1024}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ["""input_ids""", """attention_mask"""]
def __init__( self : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int="<s>" , lowerCAmelCase_ : List[str]="</s>" , lowerCAmelCase_ : Dict="</s>" , lowerCAmelCase_ : Optional[Any]="<s>" , lowerCAmelCase_ : int="<unk>" , lowerCAmelCase_ : List[Any]="<pad>" , lowerCAmelCase_ : Any="<mask>" , lowerCAmelCase_ : Any = None , **lowerCAmelCase_ : Dict , ):
"""simple docstring"""
lowercase_ = AddedToken(_a , lstrip=_a , rstrip=_a) if isinstance(_a , _a) else mask_token
lowercase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , cls_token=_a , pad_token=_a , mask_token=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
lowercase_ = vocab_file
lowercase_ = monolingual_vocab_file
lowercase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(_a))
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
lowercase_ = {}
lowercase_ = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(_a) not in self.fairseq_tokens_to_ids:
lowercase_ = cnt
cnt += 1
with open(_a , """r""" , encoding="""utf-8""") as f:
for line in f.readlines():
lowercase_ = line.strip().split()[0]
lowercase_ = len(self.fairseq_tokens_to_ids)
if str(_a) not in self.fairseq_tokens_to_ids:
lowercase_ = len(self.fairseq_tokens_to_ids)
lowercase_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Dict):
"""simple docstring"""
lowercase_ = self.__dict__.copy()
lowercase_ = None
lowercase_ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : int , lowerCAmelCase_ : List[Any]):
"""simple docstring"""
lowercase_ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs"""):
lowercase_ = {}
lowercase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int] = None):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase_ = [self.cls_token_id]
lowercase_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Tuple = False):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a)
if token_ids_a is None:
return [1] + ([0] * len(_a)) + [1]
return [1] + ([0] * len(_a)) + [1, 1] + ([0] * len(_a)) + [1]
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict = None):
"""simple docstring"""
lowercase_ = [self.sep_token_id]
lowercase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
return len(self.fairseq_ids_to_tokens)
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = {self.convert_ids_to_tokens(_a): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : str):
"""simple docstring"""
return self.sp_model.encode(_a , out_type=_a)
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : Dict):
"""simple docstring"""
return self.fairseq_ids_to_tokens[index]
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Any):
"""simple docstring"""
lowercase_ = """""".join(_a).replace(_a , """ """).strip()
return out_string
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int = None):
"""simple docstring"""
if not os.path.isdir(_a):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''')
return
lowercase_ = os.path.join(
_a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
lowercase_ = os.path.join(
_a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , )
if os.path.abspath(self.vocab_file) != os.path.abspath(_a) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , _a)
elif not os.path.isfile(self.vocab_file):
with open(_a , """wb""") as fi:
lowercase_ = self.sp_model.serialized_model_proto()
fi.write(_a)
if os.path.abspath(self.monolingual_vocab_file) != os.path.abspath(
_a) and os.path.isfile(self.monolingual_vocab_file):
copyfile(self.monolingual_vocab_file , _a)
elif not os.path.isfile(self.monolingual_vocab_file):
with open(_a , """w""" , encoding="""utf-8""") as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F'''{str(_a)} \n''')
return out_vocab_file, out_monolingual_vocab_file
| 362
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Any , lowerCAmelCase_ : int = 6):
"""simple docstring"""
lowercase_ = None
lowercase_ = None
self.create_linked_list(lowerCAmelCase_)
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ = Node()
lowercase_ = current_node
lowercase_ = current_node
lowercase_ = current_node
for _ in range(1 , lowerCAmelCase_):
lowercase_ = Node()
lowercase_ = current_node
lowercase_ = previous_node
lowercase_ = current_node
lowercase_ = self.front
lowercase_ = previous_node
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
self.check_can_perform_operation()
return self.front.data if self.front else None
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : Any):
"""simple docstring"""
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
lowercase_ = self.rear.next
if self.rear:
lowercase_ = data
def _UpperCAmelCase ( self : str):
"""simple docstring"""
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
lowercase_ = self.front.data
lowercase_ = None
return data
lowercase_ = self.front
lowercase_ = old_front.next
lowercase_ = old_front.data
lowercase_ = None
return data
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
if self.is_empty():
raise Exception("""Empty Queue""")
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""")
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[str]):
"""simple docstring"""
lowercase_ = None
lowercase_ = None
lowercase_ = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase : str = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : int = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 363
|
"""simple docstring"""
from collections.abc import Sequence
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase = False ) -> float:
'''simple docstring'''
if not arr:
return 0
lowercase_ = 0 if allow_empty_subarrays else float("""-inf""" )
lowercase_ = 0.0
for num in arr:
lowercase_ = max(0 if allow_empty_subarrays else num , curr_sum + num )
lowercase_ = max(__lowerCAmelCase , __lowerCAmelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase : Union[str, Any] = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F"{max_subarray_sum(nums) = }")
| 313
| 0
|
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class SCREAMING_SNAKE_CASE__ ( __SCREAMING_SNAKE_CASE ):
lowercase__ = ["image_processor"]
lowercase__ = "SamImageProcessor"
def __init__( self : Optional[Any] , lowerCAmelCase_ : List[Any]):
"""simple docstring"""
super().__init__(_SCREAMING_SNAKE_CASE)
lowercase_ = self.image_processor
lowercase_ = -1_0
lowercase_ = self.image_processor.size["longest_edge"]
def __call__( self : List[str] , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , **lowerCAmelCase_ : Any , ):
"""simple docstring"""
lowercase_ = self.image_processor(
_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# pop arguments that are not used in the foward but used nevertheless
lowercase_ = encoding_image_processor["original_sizes"]
if hasattr(_SCREAMING_SNAKE_CASE , """numpy"""): # Checks if Torch or TF tensor
lowercase_ = original_sizes.numpy()
lowercase_ = self._check_and_preprocess_points(
input_points=_SCREAMING_SNAKE_CASE , input_labels=_SCREAMING_SNAKE_CASE , input_boxes=_SCREAMING_SNAKE_CASE , )
lowercase_ = self._normalize_and_convert(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , input_points=_SCREAMING_SNAKE_CASE , input_labels=_SCREAMING_SNAKE_CASE , input_boxes=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , )
return encoding_image_processor
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Optional[int]="pt" , ):
"""simple docstring"""
if input_points is not None:
if len(_SCREAMING_SNAKE_CASE) != len(_SCREAMING_SNAKE_CASE):
lowercase_ = [
self._normalize_coordinates(self.target_size , _SCREAMING_SNAKE_CASE , original_sizes[0]) for point in input_points
]
else:
lowercase_ = [
self._normalize_coordinates(self.target_size , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
for point, original_size in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points):
if input_labels is not None:
lowercase_ = self._pad_points_and_labels(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
lowercase_ = np.array(_SCREAMING_SNAKE_CASE)
if input_labels is not None:
lowercase_ = np.array(_SCREAMING_SNAKE_CASE)
if input_boxes is not None:
if len(_SCREAMING_SNAKE_CASE) != len(_SCREAMING_SNAKE_CASE):
lowercase_ = [
self._normalize_coordinates(self.target_size , _SCREAMING_SNAKE_CASE , original_sizes[0] , is_bounding_box=_SCREAMING_SNAKE_CASE)
for box in input_boxes
]
else:
lowercase_ = [
self._normalize_coordinates(self.target_size , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , is_bounding_box=_SCREAMING_SNAKE_CASE)
for box, original_size in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
]
lowercase_ = np.array(_SCREAMING_SNAKE_CASE)
if input_boxes is not None:
if return_tensors == "pt":
lowercase_ = torch.from_numpy(_SCREAMING_SNAKE_CASE)
# boxes batch size of 1 by default
lowercase_ = input_boxes.unsqueeze(1) if len(input_boxes.shape) != 3 else input_boxes
elif return_tensors == "tf":
lowercase_ = tf.convert_to_tensor(_SCREAMING_SNAKE_CASE)
# boxes batch size of 1 by default
lowercase_ = tf.expand_dims(_SCREAMING_SNAKE_CASE , 1) if len(input_boxes.shape) != 3 else input_boxes
encoding_image_processor.update({"""input_boxes""": input_boxes})
if input_points is not None:
if return_tensors == "pt":
lowercase_ = torch.from_numpy(_SCREAMING_SNAKE_CASE)
# point batch size of 1 by default
lowercase_ = input_points.unsqueeze(1) if len(input_points.shape) != 4 else input_points
elif return_tensors == "tf":
lowercase_ = tf.convert_to_tensor(_SCREAMING_SNAKE_CASE)
# point batch size of 1 by default
lowercase_ = tf.expand_dims(_SCREAMING_SNAKE_CASE , 1) if len(input_points.shape) != 4 else input_points
encoding_image_processor.update({"""input_points""": input_points})
if input_labels is not None:
if return_tensors == "pt":
lowercase_ = torch.from_numpy(_SCREAMING_SNAKE_CASE)
# point batch size of 1 by default
lowercase_ = input_labels.unsqueeze(1) if len(input_labels.shape) != 3 else input_labels
elif return_tensors == "tf":
lowercase_ = tf.convert_to_tensor(_SCREAMING_SNAKE_CASE)
# point batch size of 1 by default
lowercase_ = tf.expand_dims(_SCREAMING_SNAKE_CASE , 1) if len(input_labels.shape) != 3 else input_labels
encoding_image_processor.update({"""input_labels""": input_labels})
return encoding_image_processor
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Any):
"""simple docstring"""
lowercase_ = max([point.shape[0] for point in input_points])
lowercase_ = []
for i, point in enumerate(_SCREAMING_SNAKE_CASE):
if point.shape[0] != expected_nb_points:
lowercase_ = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2)) + self.point_pad_value] , axis=0)
lowercase_ = np.append(input_labels[i] , [self.point_pad_value])
processed_input_points.append(_SCREAMING_SNAKE_CASE)
lowercase_ = processed_input_points
return input_points, input_labels
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple=False):
"""simple docstring"""
lowercase_ = original_size
lowercase_ = self.image_processor._get_preprocess_shape(_SCREAMING_SNAKE_CASE , longest_edge=_SCREAMING_SNAKE_CASE)
lowercase_ = deepcopy(_SCREAMING_SNAKE_CASE).astype(_SCREAMING_SNAKE_CASE)
if is_bounding_box:
lowercase_ = coords.reshape(-1 , 2 , 2)
lowercase_ = coords[..., 0] * (new_w / old_w)
lowercase_ = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
lowercase_ = coords.reshape(-1 , 4)
return coords
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : int=None , lowerCAmelCase_ : str=None , lowerCAmelCase_ : Dict=None , ):
"""simple docstring"""
if input_points is not None:
if hasattr(_SCREAMING_SNAKE_CASE , """numpy"""): # Checks for TF or Torch tensor
lowercase_ = input_points.numpy().tolist()
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE) or not isinstance(input_points[0] , _SCREAMING_SNAKE_CASE):
raise ValueError("""Input points must be a list of list of floating points.""")
lowercase_ = [np.array(_SCREAMING_SNAKE_CASE) for input_point in input_points]
else:
lowercase_ = None
if input_labels is not None:
if hasattr(_SCREAMING_SNAKE_CASE , """numpy"""):
lowercase_ = input_labels.numpy().tolist()
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE) or not isinstance(input_labels[0] , _SCREAMING_SNAKE_CASE):
raise ValueError("""Input labels must be a list of list integers.""")
lowercase_ = [np.array(_SCREAMING_SNAKE_CASE) for label in input_labels]
else:
lowercase_ = None
if input_boxes is not None:
if hasattr(_SCREAMING_SNAKE_CASE , """numpy"""):
lowercase_ = input_boxes.numpy().tolist()
if (
not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
or not isinstance(input_boxes[0] , _SCREAMING_SNAKE_CASE)
or not isinstance(input_boxes[0][0] , _SCREAMING_SNAKE_CASE)
):
raise ValueError("""Input boxes must be a list of list of list of floating points.""")
lowercase_ = [np.array(_SCREAMING_SNAKE_CASE).astype(np.floataa) for box in input_boxes]
else:
lowercase_ = None
return input_points, input_labels, input_boxes
@property
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = self.image_processor.model_input_names
return list(dict.fromkeys(_SCREAMING_SNAKE_CASE))
def _UpperCAmelCase ( self : Any , *lowerCAmelCase_ : Dict , **lowerCAmelCase_ : str):
"""simple docstring"""
return self.image_processor.post_process_masks(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
| 364
|
"""simple docstring"""
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
UpperCAmelCase : Optional[int] = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[Any]=None):
"""simple docstring"""
lowercase_ = self.layer[current_layer](lowerCAmelCase_ , lowerCAmelCase_ , head_mask[current_layer])
lowercase_ = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , __UpperCAmelCase , )
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def __init__( self : Any , lowerCAmelCase_ : Dict):
"""simple docstring"""
super().__init__(lowerCAmelCase_)
lowercase_ = BertEncoderWithPabee(lowerCAmelCase_)
self.init_weights()
lowercase_ = 0
lowercase_ = 0
lowercase_ = 0
lowercase_ = 0
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ = threshold
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
lowercase_ = patience
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = 0
lowercase_ = 0
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = self.inference_layers_num / self.inference_instances_num
lowercase_ = (
F'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='''
F''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'''
)
print(lowerCAmelCase_)
@add_start_docstrings_to_model_forward(lowerCAmelCase_)
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : str=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[Any]=False , ):
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""")
elif input_ids is not None:
lowercase_ = input_ids.size()
elif inputs_embeds is not None:
lowercase_ = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""")
lowercase_ = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
lowercase_ = torch.ones(lowerCAmelCase_ , device=lowerCAmelCase_)
if token_type_ids is None:
lowercase_ = torch.zeros(lowerCAmelCase_ , dtype=torch.long , device=lowerCAmelCase_)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
lowercase_ = self.get_extended_attention_mask(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
lowercase_ , lowercase_ , lowercase_ = encoder_hidden_states.size()
lowercase_ = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
lowercase_ = torch.ones(lowerCAmelCase_ , device=lowerCAmelCase_)
lowercase_ = self.invert_attention_mask(lowerCAmelCase_)
else:
lowercase_ = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
lowercase_ = self.get_head_mask(lowerCAmelCase_ , self.config.num_hidden_layers)
lowercase_ = self.embeddings(
input_ids=lowerCAmelCase_ , position_ids=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , inputs_embeds=lowerCAmelCase_)
lowercase_ = embedding_output
if self.training:
lowercase_ = []
for i in range(self.config.num_hidden_layers):
lowercase_ = self.encoder.adaptive_forward(
lowerCAmelCase_ , current_layer=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , head_mask=lowerCAmelCase_)
lowercase_ = self.pooler(lowerCAmelCase_)
lowercase_ = output_layers[i](output_dropout(lowerCAmelCase_))
res.append(lowerCAmelCase_)
elif self.patience == 0: # Use all layers for inference
lowercase_ = self.encoder(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , )
lowercase_ = self.pooler(encoder_outputs[0])
lowercase_ = [output_layers[self.config.num_hidden_layers - 1](lowerCAmelCase_)]
else:
lowercase_ = 0
lowercase_ = None
lowercase_ = 0
for i in range(self.config.num_hidden_layers):
calculated_layer_num += 1
lowercase_ = self.encoder.adaptive_forward(
lowerCAmelCase_ , current_layer=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , head_mask=lowerCAmelCase_)
lowercase_ = self.pooler(lowerCAmelCase_)
lowercase_ = output_layers[i](lowerCAmelCase_)
if regression:
lowercase_ = logits.detach()
if patient_result is not None:
lowercase_ = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels) < self.regression_threshold:
patient_counter += 1
else:
lowercase_ = 0
else:
lowercase_ = logits.detach().argmax(dim=1)
if patient_result is not None:
lowercase_ = patient_result.detach().argmax(dim=1)
if (patient_result is not None) and torch.all(labels.eq(lowerCAmelCase_)):
patient_counter += 1
else:
lowercase_ = 0
lowercase_ = logits
if patient_counter == self.patience:
break
lowercase_ = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , __UpperCAmelCase , )
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def __init__( self : Optional[Any] , lowerCAmelCase_ : str):
"""simple docstring"""
super().__init__(lowerCAmelCase_)
lowercase_ = config.num_labels
lowercase_ = BertModelWithPabee(lowerCAmelCase_)
lowercase_ = nn.Dropout(config.hidden_dropout_prob)
lowercase_ = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels) for _ in range(config.num_hidden_layers)])
self.init_weights()
@add_start_docstrings_to_model_forward(lowerCAmelCase_)
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : List[str]=None , ):
"""simple docstring"""
lowercase_ = self.bert(
input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , position_ids=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , inputs_embeds=lowerCAmelCase_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
lowercase_ = (logits[-1],)
if labels is not None:
lowercase_ = None
lowercase_ = 0
for ix, logits_item in enumerate(lowerCAmelCase_):
if self.num_labels == 1:
# We are doing regression
lowercase_ = MSELoss()
lowercase_ = loss_fct(logits_item.view(-1) , labels.view(-1))
else:
lowercase_ = CrossEntropyLoss()
lowercase_ = loss_fct(logits_item.view(-1 , self.num_labels) , labels.view(-1))
if total_loss is None:
lowercase_ = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
lowercase_ = (total_loss / total_weights,) + outputs
return outputs
| 313
| 0
|
"""simple docstring"""
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
return "".join(sorted(_a ) )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
return word_by_signature[signature(_a )]
UpperCAmelCase : Optional[Any] = Path(__file__).parent.joinpath("words.txt").read_text(encoding="utf-8")
UpperCAmelCase : Union[str, Any] = sorted({word.strip().lower() for word in data.splitlines()})
UpperCAmelCase : Tuple = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
UpperCAmelCase : Tuple = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("anagrams.txt", "w") as file:
file.write("all_anagrams = \n ")
file.write(pprint.pformat(all_anagrams))
| 365
|
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Any:
'''simple docstring'''
if (
(cp >= 0x4e00 and cp <= 0x9fff)
or (cp >= 0x3400 and cp <= 0x4dbf) #
or (cp >= 0x20000 and cp <= 0x2a6df) #
or (cp >= 0x2a700 and cp <= 0x2b73f) #
or (cp >= 0x2b740 and cp <= 0x2b81f) #
or (cp >= 0x2b820 and cp <= 0x2ceaf) #
or (cp >= 0xf900 and cp <= 0xfaff)
or (cp >= 0x2f800 and cp <= 0x2fa1f) #
): #
return True
return False
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[str]:
'''simple docstring'''
for char in word:
lowercase_ = ord(__lowerCAmelCase )
if not _is_chinese_char(__lowerCAmelCase ):
return 0
return 1
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Any:
'''simple docstring'''
lowercase_ = set()
for token in tokens:
lowercase_ = len(__lowerCAmelCase ) > 1 and is_chinese(__lowerCAmelCase )
if chinese_word:
word_set.add(__lowerCAmelCase )
lowercase_ = list(__lowerCAmelCase )
return word_list
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
lowercase_ = max([len(__lowerCAmelCase ) for w in chinese_word_set] )
lowercase_ = bert_tokens
lowercase_ , lowercase_ = 0, len(__lowerCAmelCase )
while start < end:
lowercase_ = True
if is_chinese(bert_word[start] ):
lowercase_ = min(end - start , __lowerCAmelCase )
for i in range(__lowerCAmelCase , 1 , -1 ):
lowercase_ = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
lowercase_ = """##""" + bert_word[j]
lowercase_ = start + i
lowercase_ = False
break
if single_word:
start += 1
return bert_word
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = []
for i in range(0 , len(__lowerCAmelCase ) , 1_00 ):
lowercase_ = ltp_tokenizer.seg(lines[i : i + 1_00] )[0]
lowercase_ = [get_chinese_word(__lowerCAmelCase ) for r in res]
ltp_res.extend(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
lowercase_ = []
for i in range(0 , len(__lowerCAmelCase ) , 1_00 ):
lowercase_ = bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=5_12 )
bert_res.extend(res["""input_ids"""] )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
lowercase_ = []
for input_ids, chinese_word in zip(__lowerCAmelCase , __lowerCAmelCase ):
lowercase_ = []
for id in input_ids:
lowercase_ = bert_tokenizer._convert_id_to_token(__lowerCAmelCase )
input_tokens.append(__lowerCAmelCase )
lowercase_ = add_sub_symbol(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__lowerCAmelCase ):
if token[:2] == "##":
lowercase_ = token[2:]
# save chinese tokens' pos
if len(__lowerCAmelCase ) == 1 and _is_chinese_char(ord(__lowerCAmelCase ) ):
ref_id.append(__lowerCAmelCase )
ref_ids.append(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
return ref_ids
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
with open(args.file_name , """r""" , encoding="""utf-8""" ) as f:
lowercase_ = f.readlines()
lowercase_ = [line.strip() for line in data if len(__lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
lowercase_ = LTP(args.ltp ) # faster in GPU device
lowercase_ = BertTokenizer.from_pretrained(args.bert )
lowercase_ = prepare_ref(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
with open(args.save_path , """w""" , encoding="""utf-8""" ) as f:
lowercase_ = [json.dumps(__lowerCAmelCase ) + """\n""" for ref in ref_ids]
f.writelines(__lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase : List[Any] = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp", type=str, default="./resources/ltp", help="resources for LTP tokenizer, usually a path"
)
parser.add_argument("--bert", type=str, default="./resources/robert", help="resources for Bert tokenizer")
parser.add_argument("--save_path", type=str, default="./resources/ref.txt", help="path to save res")
UpperCAmelCase : int = parser.parse_args()
main(args)
| 313
| 0
|
"""simple docstring"""
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class SCREAMING_SNAKE_CASE__ :
def __init__( self : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any]=9_9 , lowerCAmelCase_ : int=1_3 , lowerCAmelCase_ : str=7 , lowerCAmelCase_ : Optional[int]=9 , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Union[str, Any]=3_2 , lowerCAmelCase_ : Optional[Any]=5 , lowerCAmelCase_ : List[str]=4 , lowerCAmelCase_ : List[Any]=3_7 , lowerCAmelCase_ : List[Any]=8 , lowerCAmelCase_ : str=0.1 , lowerCAmelCase_ : Union[str, Any]=0.002 , lowerCAmelCase_ : Any=1 , lowerCAmelCase_ : str=0 , lowerCAmelCase_ : str=0 , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Optional[Any]=None , ):
"""simple docstring"""
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = encoder_seq_length
lowercase_ = decoder_seq_length
# For common tests
lowercase_ = self.decoder_seq_length
lowercase_ = is_training
lowercase_ = use_attention_mask
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = d_ff
lowercase_ = relative_attention_num_buckets
lowercase_ = dropout_rate
lowercase_ = initializer_factor
lowercase_ = eos_token_id
lowercase_ = pad_token_id
lowercase_ = decoder_start_token_id
lowercase_ = None
lowercase_ = decoder_layers
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
return TaConfig.from_pretrained("""google/umt5-base""")
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : int=None , ):
"""simple docstring"""
if attention_mask is None:
lowercase_ = input_ids.ne(config.pad_token_id)
if decoder_attention_mask is None:
lowercase_ = decoder_input_ids.ne(config.pad_token_id)
if head_mask is None:
lowercase_ = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=_lowercase)
if decoder_head_mask is None:
lowercase_ = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=_lowercase)
if cross_attn_head_mask is None:
lowercase_ = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=_lowercase)
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
lowercase_ = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size)
lowercase_ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size)
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
lowercase_ = input_ids.clamp(self.pad_token_id + 1)
lowercase_ = decoder_input_ids.clamp(self.pad_token_id + 1)
lowercase_ = self.get_config()
lowercase_ = config.num_attention_heads
lowercase_ = self.prepare_inputs_dict(_lowercase , _lowercase , _lowercase)
return config, input_dict
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ , lowercase_ = self.prepare_config_and_inputs()
return config, inputs_dict
def _UpperCAmelCase ( self : int):
"""simple docstring"""
return TaConfig(
vocab_size=1_6_6 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , ):
"""simple docstring"""
lowercase_ = UMTaModel(config=_lowercase)
model.to(_lowercase)
model.eval()
lowercase_ = model(
input_ids=_lowercase , decoder_input_ids=_lowercase , attention_mask=_lowercase , decoder_attention_mask=_lowercase , )
lowercase_ = model(input_ids=_lowercase , decoder_input_ids=_lowercase)
lowercase_ = result.last_hidden_state
lowercase_ = result.past_key_values
lowercase_ = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size))
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size))
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(_lowercase) , config.num_layers)
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0]) , 4)
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , ):
"""simple docstring"""
lowercase_ = UMTaModel(config=_lowercase).get_decoder().to(_lowercase).eval()
# first forward pass
lowercase_ = model(_lowercase , use_cache=_lowercase)
lowercase_ = model(_lowercase)
lowercase_ = model(_lowercase , use_cache=_lowercase)
self.parent.assertTrue(len(_lowercase) == len(_lowercase))
self.parent.assertTrue(len(_lowercase) == len(_lowercase) + 1)
lowercase_ , lowercase_ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowercase_ = ids_tensor((self.batch_size, 1) , config.vocab_size)
# append to next input_ids and
lowercase_ = torch.cat([input_ids, next_tokens] , dim=-1)
lowercase_ = model(_lowercase)["""last_hidden_state"""]
lowercase_ = model(_lowercase , past_key_values=_lowercase)["""last_hidden_state"""]
# select random slice
lowercase_ = ids_tensor((1,) , output_from_past.shape[-1]).item()
lowercase_ = output_from_no_past[:, -1, random_slice_idx].detach()
lowercase_ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1E-3))
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str , ):
"""simple docstring"""
lowercase_ = UMTaModel(config=_lowercase).to(_lowercase).half().eval()
lowercase_ = model(**_lowercase)["""last_hidden_state"""]
self.parent.assertFalse(torch.isnan(_lowercase).any().item())
@require_torch
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
lowercase__ = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
lowercase__ = (UMTaForConditionalGeneration,) if is_torch_available() else ()
lowercase__ = (
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
lowercase__ = True
lowercase__ = False
lowercase__ = False
lowercase__ = True
lowercase__ = True
# The small UMT5 model needs higher percentages for CPU/MP tests
lowercase__ = [0.8, 0.9]
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
lowercase_ = UMTaModelTester(self)
@unittest.skip("""Test has a segmentation fault on torch 1.8.0""")
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
lowercase_ = UMTaModel(config_and_inputs[0]).to(_lowercase)
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
_lowercase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F'''{tmpdirname}/t5_test.onnx''' , export_params=_lowercase , opset_version=9 , input_names=["""input_ids""", """decoder_input_ids"""] , )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""")
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*_lowercase)
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = ["""encoder_attentions""", """decoder_attentions""", """cross_attentions"""]
lowercase_ = self.model_tester.prepare_config_and_inputs()
lowercase_ = config_and_inputs[0]
lowercase_ = UMTaForConditionalGeneration(_lowercase).eval()
model.to(_lowercase)
lowercase_ = {
"""head_mask""": torch.zeros(config.num_layers , config.num_heads , device=_lowercase),
"""decoder_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=_lowercase),
"""cross_attn_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=_lowercase),
}
for attn_name, (name, mask) in zip(_lowercase , head_masking.items()):
lowercase_ = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
lowercase_ = torch.ones(
config.num_decoder_layers , config.num_heads , device=_lowercase)
lowercase_ = model.generate(
config_and_inputs[1]["""input_ids"""] , num_beams=1 , max_length=3 , output_attentions=_lowercase , return_dict_in_generate=_lowercase , **_lowercase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
lowercase_ = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights]) , 0.0)
@unittest.skip("""Does not work on the tiny model as we keep hitting edge cases.""")
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
@unittest.skip(
"""Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged""")
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = UMTaForConditionalGeneration.from_pretrained("""google/umt5-small""" , return_dict=_lowercase).to(_lowercase)
lowercase_ = AutoTokenizer.from_pretrained("""google/umt5-small""" , use_fast=_lowercase , legacy=_lowercase)
lowercase_ = [
"""Bonjour monsieur <extra_id_0> bien <extra_id_1>.""",
"""No se como puedo <extra_id_0>.""",
"""This is the reason why we <extra_id_0> them.""",
"""The <extra_id_0> walks in <extra_id_1>, seats""",
"""A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.""",
]
lowercase_ = tokenizer(_lowercase , return_tensors="""pt""" , padding=_lowercase).input_ids
# fmt: off
lowercase_ = torch.tensor(
[
[ 3_8_5_3_0, 2_1_0_7_0_3, 2_5_6_2_9_9, 1_4_1_0, 2_5_6_2_9_8, 2_7_4, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_2_6, 3_2_1, 6_7_1, 2_5_9_2_2, 2_5_6_2_9_9, 2_7_4, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1_4_6_0, 3_3_9, 3_1_2, 1_9_0_1_4, 1_0_6_2_0, 7_5_8, 2_5_6_2_9_9, 2_3_5_5,2_7_4, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_1_7, 2_5_6_2_9_9, 1_4_8_6_9, 2_8_1, 3_0_1, 2_5_6_2_9_8, 2_7_5, 1_1_9_9_8_3,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_2_0, 2_5_6_2_9_9, 1_4_8_6_9, 2_8_1, 2_2_3_4, 2_8_9, 2_2_7_5, 3_3_3,6_1_3_9_1, 2_8_9, 2_5_6_2_9_8, 5_4_3, 2_5_6_2_9_7, 1_6_8_7_1_4, 3_2_9, 2_5_6_2_9_6,2_7_4, 1],
])
# fmt: on
torch.testing.assert_allclose(_lowercase , _lowercase)
lowercase_ = model.generate(input_ids.to(_lowercase))
lowercase_ = [
"""<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>""",
"""<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
]
lowercase_ = tokenizer.batch_decode(_lowercase)
self.assertEqual(_lowercase , _lowercase)
| 366
|
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> list[str]:
'''simple docstring'''
if nth_term == "":
return [""]
lowercase_ = int(__lowerCAmelCase )
lowercase_ = int(__lowerCAmelCase )
lowercase_ = []
for temp in range(int(__lowerCAmelCase ) ):
series.append(F'''1 / {pow(temp + 1 , int(__lowerCAmelCase ) )}''' if series else """1""" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase : List[str] = int(input("Enter the last number (nth term) of the P-Series"))
UpperCAmelCase : Tuple = int(input("Enter the power for P-Series"))
print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
print(p_series(nth_term, power))
| 313
| 0
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = """"""
for word_or_phrase in separated:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise Exception("""join() accepts only strings to be joined""" )
joined += word_or_phrase + separator
return joined.strip(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 367
|
"""simple docstring"""
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = Vector([1, 2, 3])
self.assertEqual(x.component(0) , 1)
self.assertEqual(x.component(2) , 3)
lowercase_ = Vector()
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = Vector([0, 0, 0, 0, 0, 1])
self.assertEqual(str(lowerCAmelCase_) , """(0,0,0,0,0,1)""")
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = Vector([1, 2, 3, 4])
self.assertEqual(len(lowerCAmelCase_) , 4)
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = Vector([1, 2])
lowercase_ = Vector([1, 2, 3, 4, 5])
lowercase_ = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
lowercase_ = Vector([1, -1, 1, -1, 2, -3, 4, -5])
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3)
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3)
self.assertEqual(z.euclidean_length() , 0)
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = Vector([1, 2, 3])
lowercase_ = Vector([1, 1, 1])
self.assertEqual((x + y).component(0) , 2)
self.assertEqual((x + y).component(1) , 3)
self.assertEqual((x + y).component(2) , 4)
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = Vector([1, 2, 3])
lowercase_ = Vector([1, 1, 1])
self.assertEqual((x - y).component(0) , 0)
self.assertEqual((x - y).component(1) , 1)
self.assertEqual((x - y).component(2) , 2)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = Vector([1, 2, 3])
lowercase_ = Vector([2, -1, 4]) # for test of dot product
lowercase_ = Vector([1, -2, -1])
self.assertEqual(str(x * 3.0) , """(3.0,6.0,9.0)""")
self.assertEqual((a * b) , 0)
def _UpperCAmelCase ( self : int):
"""simple docstring"""
self.assertEqual(str(zero_vector(1_0)).count("""0""") , 1_0)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
self.assertEqual(str(unit_basis_vector(3 , 1)) , """(0,1,0)""")
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = Vector([1, 2, 3])
lowercase_ = Vector([1, 0, 1])
self.assertEqual(str(axpy(2 , lowerCAmelCase_ , lowerCAmelCase_)) , """(3,4,7)""")
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = Vector([1, 0, 0, 0, 0, 0])
lowercase_ = x.copy()
self.assertEqual(str(lowerCAmelCase_) , str(lowerCAmelCase_))
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = Vector([1, 0, 0])
x.change_component(0 , 0)
x.change_component(1 , 1)
self.assertEqual(str(lowerCAmelCase_) , """(0,1,0)""")
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(lowerCAmelCase_))
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
lowercase_ = [[-3, -1_4, -1_0], [-5, -1_0, -5], [-2, -1, 0]]
for x in range(a.height()):
for y in range(a.width()):
self.assertEqual(minors[x][y] , a.minor(lowerCAmelCase_ , lowerCAmelCase_))
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
lowercase_ = [[-3, 1_4, -1_0], [5, -1_0, 5], [-2, 1, 0]]
for x in range(a.height()):
for y in range(a.width()):
self.assertEqual(cofactors[x][y] , a.cofactor(lowerCAmelCase_ , lowerCAmelCase_))
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
self.assertEqual(-5 , a.determinant())
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3)
lowercase_ = Vector([1, 2, 3])
self.assertEqual("""(14,32,50)""" , str(a * x))
self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2))
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
a.change_component(0 , 2 , 5)
self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(lowerCAmelCase_))
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
self.assertEqual(7 , a.component(2 , 1) , 0.01)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
lowercase_ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 1_0]] , 3 , 3)
self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b))
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
lowercase_ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 1_0]] , 3 , 3)
self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b))
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
self.assertEqual(
"""|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5)) , )
if __name__ == "__main__":
unittest.main()
| 313
| 0
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict=1_3 , lowerCAmelCase_ : List[Any]=7 , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Any=9_9 , lowerCAmelCase_ : Dict=3_2 , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : Tuple=4 , lowerCAmelCase_ : List[Any]=3_7 , lowerCAmelCase_ : Any="gelu" , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : Optional[Any]=5_1_2 , lowerCAmelCase_ : Any=1_6 , lowerCAmelCase_ : Union[str, Any]=2 , lowerCAmelCase_ : Dict=0.02 , lowerCAmelCase_ : Union[str, Any]=3 , lowerCAmelCase_ : Tuple=4 , lowerCAmelCase_ : int=None , ):
"""simple docstring"""
lowercase_ = parent
lowercase_ = 1_3
lowercase_ = 7
lowercase_ = True
lowercase_ = True
lowercase_ = True
lowercase_ = True
lowercase_ = 9_9
lowercase_ = 3_2
lowercase_ = 2
lowercase_ = 4
lowercase_ = 3_7
lowercase_ = """gelu"""
lowercase_ = 0.1
lowercase_ = 0.1
lowercase_ = 5_1_2
lowercase_ = 1_6
lowercase_ = 2
lowercase_ = 0.02
lowercase_ = 3
lowercase_ = 4
lowercase_ = None
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase_ = None
if self.use_input_mask:
lowercase_ = random_attention_mask([self.batch_size, self.seq_length])
lowercase_ = None
if self.use_token_type_ids:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
lowercase_ = None
lowercase_ = None
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
lowercase_ = ids_tensor([self.batch_size] , self.num_choices)
lowercase_ = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__lowerCamelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str):
"""simple docstring"""
lowercase_ = TFRoFormerModel(config=__lowerCamelCase)
lowercase_ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowercase_ = [input_ids, input_mask]
lowercase_ = model(__lowerCamelCase)
lowercase_ = model(__lowerCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str]):
"""simple docstring"""
lowercase_ = True
lowercase_ = TFRoFormerForCausalLM(config=__lowerCamelCase)
lowercase_ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowercase_ = model(__lowerCamelCase)["""logits"""]
self.parent.assertListEqual(
list(prediction_scores.numpy().shape) , [self.batch_size, self.seq_length, self.vocab_size])
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ = TFRoFormerForMaskedLM(config=__lowerCamelCase)
lowercase_ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowercase_ = model(__lowerCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
lowercase_ = self.num_labels
lowercase_ = TFRoFormerForSequenceClassification(config=__lowerCamelCase)
lowercase_ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowercase_ = model(__lowerCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int]):
"""simple docstring"""
lowercase_ = self.num_choices
lowercase_ = TFRoFormerForMultipleChoice(config=__lowerCamelCase)
lowercase_ = tf.tile(tf.expand_dims(__lowerCamelCase , 1) , (1, self.num_choices, 1))
lowercase_ = tf.tile(tf.expand_dims(__lowerCamelCase , 1) , (1, self.num_choices, 1))
lowercase_ = tf.tile(tf.expand_dims(__lowerCamelCase , 1) , (1, self.num_choices, 1))
lowercase_ = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
lowercase_ = model(__lowerCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : str):
"""simple docstring"""
lowercase_ = self.num_labels
lowercase_ = TFRoFormerForTokenClassification(config=__lowerCamelCase)
lowercase_ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowercase_ = model(__lowerCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str):
"""simple docstring"""
lowercase_ = TFRoFormerForQuestionAnswering(config=__lowerCamelCase)
lowercase_ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowercase_ = model(__lowerCamelCase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = self.prepare_config_and_inputs()
(
lowercase_
) = config_and_inputs
lowercase_ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( _a , _a , unittest.TestCase ):
lowercase__ = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
lowercase__ = (
{
"""feature-extraction""": TFRoFormerModel,
"""fill-mask""": TFRoFormerForMaskedLM,
"""question-answering""": TFRoFormerForQuestionAnswering,
"""text-classification""": TFRoFormerForSequenceClassification,
"""text-generation""": TFRoFormerForCausalLM,
"""token-classification""": TFRoFormerForTokenClassification,
"""zero-shot""": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase__ = False
lowercase__ = False
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = TFRoFormerModelTester(self)
lowercase_ = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=3_7)
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase)
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase)
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*__lowerCamelCase)
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCamelCase)
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCamelCase)
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCamelCase)
@slow
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = TFRoFormerModel.from_pretrained("""junnyu/roformer_chinese_base""")
self.assertIsNotNone(__lowerCamelCase)
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = TFRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""")
lowercase_ = tf.constant([[0, 1, 2, 3, 4, 5]])
lowercase_ = model(__lowerCamelCase)[0]
# TODO Replace vocab size
lowercase_ = 5_0_0_0_0
lowercase_ = [1, 6, vocab_size]
self.assertEqual(output.shape , __lowerCamelCase)
print(output[:, :3, :3])
# TODO Replace values below with what was printed above.
lowercase_ = tf.constant(
[
[
[-0.12_053_341, -1.0_264_901, 0.29_221_946],
[-1.5_133_783, 0.197_433, 0.15_190_607],
[-5.0_135_403, -3.900_256, -0.84_038_764],
]
])
tf.debugging.assert_near(output[:, :3, :3] , __lowerCamelCase , atol=1E-4)
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
lowercase__ = 1e-4
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = tf.constant([[4, 1_0]])
lowercase_ = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6)
lowercase_ = emba(input_ids.shape)
lowercase_ = tf.constant(
[[0.0_000, 0.0_000, 0.0_000, 1.0_000, 1.0_000, 1.0_000], [0.8_415, 0.0_464, 0.0_022, 0.5_403, 0.9_989, 1.0_000]])
tf.debugging.assert_near(__lowerCamelCase , __lowerCamelCase , atol=self.tolerance)
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = tf.constant(
[
[0.0_000, 0.0_000, 0.0_000, 0.0_000, 0.0_000],
[0.8_415, 0.8_219, 0.8_020, 0.7_819, 0.7_617],
[0.9_093, 0.9_364, 0.9_581, 0.9_749, 0.9_870],
])
lowercase_ = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_1_2 , embedding_dim=5_1_2)
emba([2, 1_6, 5_1_2])
lowercase_ = emba.weight[:3, :5]
tf.debugging.assert_near(__lowerCamelCase , __lowerCamelCase , atol=self.tolerance)
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
lowercase__ = 1e-4
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa) , shape=(2, 1_2, 1_6, 6_4)) / 1_0_0
lowercase_ = -tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa) , shape=(2, 1_2, 1_6, 6_4)) / 1_0_0
lowercase_ = TFRoFormerSinusoidalPositionalEmbedding(num_positions=3_2 , embedding_dim=6_4)
lowercase_ = embed_positions([2, 1_6, 7_6_8])[None, None, :, :]
lowercase_ = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
lowercase_ = tf.constant(
[
[0.0_000, 0.0_100, 0.0_200, 0.0_300, 0.0_400, 0.0_500, 0.0_600, 0.0_700],
[-0.2_012, 0.8_897, 0.0_263, 0.9_401, 0.2_074, 0.9_463, 0.3_481, 0.9_343],
[-1.7_057, 0.6_271, -1.2_145, 1.3_897, -0.6_303, 1.7_647, -0.1_173, 1.8_985],
[-2.1_731, -1.6_397, -2.7_358, 0.2_854, -2.1_840, 1.7_183, -1.3_018, 2.4_871],
[0.2_717, -3.6_173, -2.9_206, -2.1_988, -3.6_638, 0.3_858, -2.9_155, 2.2_980],
[3.9_859, -2.1_580, -0.7_984, -4.4_904, -4.1_181, -2.0_252, -4.4_782, 1.1_253],
])
lowercase_ = tf.constant(
[
[0.0_000, -0.0_100, -0.0_200, -0.0_300, -0.0_400, -0.0_500, -0.0_600, -0.0_700],
[0.2_012, -0.8_897, -0.0_263, -0.9_401, -0.2_074, -0.9_463, -0.3_481, -0.9_343],
[1.7_057, -0.6_271, 1.2_145, -1.3_897, 0.6_303, -1.7_647, 0.1_173, -1.8_985],
[2.1_731, 1.6_397, 2.7_358, -0.2_854, 2.1_840, -1.7_183, 1.3_018, -2.4_871],
[-0.2_717, 3.6_173, 2.9_206, 2.1_988, 3.6_638, -0.3_858, 2.9_155, -2.2_980],
[-3.9_859, 2.1_580, 0.7_984, 4.4_904, 4.1_181, 2.0_252, 4.4_782, -1.1_253],
])
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , __lowerCamelCase , atol=self.tolerance)
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , __lowerCamelCase , atol=self.tolerance)
| 368
|
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = 0
if start < end:
lowercase_ = randint(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = a[end]
lowercase_ = a[pivot]
lowercase_ = temp
lowercase_ , lowercase_ = _in_place_partition(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
count += _in_place_quick_sort(__lowerCAmelCase , __lowerCAmelCase , p - 1 )
count += _in_place_quick_sort(__lowerCAmelCase , p + 1 , __lowerCAmelCase )
return count
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = 0
lowercase_ = randint(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = a[end]
lowercase_ = a[pivot]
lowercase_ = temp
lowercase_ = start - 1
for index in range(__lowerCAmelCase , __lowerCAmelCase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
lowercase_ = new_pivot_index + 1
lowercase_ = a[new_pivot_index]
lowercase_ = a[index]
lowercase_ = temp
lowercase_ = a[new_pivot_index + 1]
lowercase_ = a[end]
lowercase_ = temp
return new_pivot_index + 1, count
UpperCAmelCase : Union[str, Any] = TemporaryFile()
UpperCAmelCase : Optional[int] = 100 # 1000 elements are to be sorted
UpperCAmelCase , UpperCAmelCase : List[str] = 0, 1 # mean and standard deviation
UpperCAmelCase : Optional[Any] = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
UpperCAmelCase : List[str] = np.load(outfile)
UpperCAmelCase : List[Any] = len(M) - 1
UpperCAmelCase : Optional[int] = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 313
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
lowercase__ = StableUnCLIPImgaImgPipeline
lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowercase__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowercase__ = frozenset([] )
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
lowercase_ = 3_2
lowercase_ = embedder_hidden_size
# image encoding components
lowercase_ = CLIPImageProcessor(crop_size=3_2 , size=3_2)
torch.manual_seed(0)
lowercase_ = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=lowercase_ , projection_dim=lowercase_ , num_hidden_layers=5 , num_attention_heads=4 , image_size=3_2 , intermediate_size=3_7 , patch_size=1 , ))
# regular denoising components
torch.manual_seed(0)
lowercase_ = StableUnCLIPImageNormalizer(embedding_dim=lowercase_)
lowercase_ = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""")
torch.manual_seed(0)
lowercase_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""")
torch.manual_seed(0)
lowercase_ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase_ , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ))
torch.manual_seed(0)
lowercase_ = UNetaDConditionModel(
sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(3_2, 6_4) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowercase_ , layers_per_block=1 , upcast_attention=lowercase_ , use_linear_projection=lowercase_ , )
torch.manual_seed(0)
lowercase_ = DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="""v_prediction""" , set_alpha_to_one=lowercase_ , steps_offset=1 , )
torch.manual_seed(0)
lowercase_ = AutoencoderKL()
lowercase_ = {
# image encoding components
"""feature_extractor""": feature_extractor,
"""image_encoder""": image_encoder.eval(),
# image noising components
"""image_normalizer""": image_normalizer.eval(),
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder.eval(),
"""unet""": unet.eval(),
"""scheduler""": scheduler,
"""vae""": vae.eval(),
}
return components
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int=0 , lowerCAmelCase_ : Dict=True):
"""simple docstring"""
if str(lowercase_).startswith("""mps"""):
lowercase_ = torch.manual_seed(lowercase_)
else:
lowercase_ = torch.Generator(device=lowercase_).manual_seed(lowercase_)
lowercase_ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowercase_)).to(lowercase_)
if pil_image:
lowercase_ = input_image * 0.5 + 0.5
lowercase_ = input_image.clamp(0 , 1)
lowercase_ = input_image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
lowercase_ = DiffusionPipeline.numpy_to_pil(lowercase_)[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase_ = self.get_dummy_components()
lowercase_ = StableUnCLIPImgaImgPipeline(**lowercase_)
lowercase_ = sd_pipe.to(lowercase_)
sd_pipe.set_progress_bar_config(disable=lowercase_)
lowercase_ = self.get_dummy_inputs(lowercase_)
inputs.update({"""image_embeds""": None})
lowercase_ = sd_pipe(**lowercase_).images
lowercase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowercase_ = np.array([0.3_872, 0.7_224, 0.5_601, 0.4_741, 0.6_872, 0.5_814, 0.4_636, 0.3_867, 0.5_078])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = torch_device in ["""cpu""", """mps"""]
self._test_attention_slicing_forward_pass(test_max_difference=lowercase_)
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
lowercase_ = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=lowercase_)
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowercase_)
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""")
lowercase_ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy""")
lowercase_ = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-l-img2img""" , torch_dtype=torch.floataa)
pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase_ = torch.Generator(device="""cpu""").manual_seed(0)
lowercase_ = pipe(lowercase_ , """anime turle""" , generator=lowercase_ , output_type="""np""")
lowercase_ = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(lowercase_ , lowercase_)
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""")
lowercase_ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy""")
lowercase_ = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa)
pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase_ = torch.Generator(device="""cpu""").manual_seed(0)
lowercase_ = pipe(lowercase_ , """anime turle""" , generator=lowercase_ , output_type="""np""")
lowercase_ = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(lowercase_ , lowercase_)
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""")
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase_ = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa)
lowercase_ = pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase_ = pipe(
lowercase_ , """anime turtle""" , num_inference_steps=2 , output_type="""np""" , )
lowercase_ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 1_0**9
| 369
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase=False ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''module.blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''module.blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""module.cls_token""", """vit.embeddings.cls_token"""),
("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""module.pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""module.norm.weight""", """layernorm.weight"""),
("""module.norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowercase_ = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ) -> Any:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
lowercase_ = """"""
else:
lowercase_ = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase_ = state_dict.pop(F'''module.blocks.{i}.attn.qkv.weight''' )
lowercase_ = state_dict.pop(F'''module.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase_ = in_proj_weight[
: config.hidden_size, :
]
lowercase_ = in_proj_bias[: config.hidden_size]
lowercase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase_ = in_proj_weight[
-config.hidden_size :, :
]
lowercase_ = in_proj_bias[-config.hidden_size :]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase_ = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = [
"""module.fc.fc1.weight""",
"""module.fc.fc1.bias""",
"""module.fc.bn1.weight""",
"""module.fc.bn1.bias""",
"""module.fc.bn1.running_mean""",
"""module.fc.bn1.running_var""",
"""module.fc.bn1.num_batches_tracked""",
"""module.fc.fc2.weight""",
"""module.fc.fc2.bias""",
"""module.fc.bn2.weight""",
"""module.fc.bn2.bias""",
"""module.fc.bn2.running_mean""",
"""module.fc.bn2.running_var""",
"""module.fc.bn2.num_batches_tracked""",
"""module.fc.fc3.weight""",
"""module.fc.fc3.bias""",
]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = dct.pop(__lowerCAmelCase )
lowercase_ = val
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = ViTMSNConfig()
lowercase_ = 10_00
lowercase_ = """datasets/huggingface/label-files"""
lowercase_ = """imagenet-1k-id2label.json"""
lowercase_ = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase ) , """r""" ) )
lowercase_ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
lowercase_ = idalabel
lowercase_ = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowercase_ = 3_84
lowercase_ = 15_36
lowercase_ = 6
elif "l16" in checkpoint_url:
lowercase_ = 10_24
lowercase_ = 40_96
lowercase_ = 24
lowercase_ = 16
lowercase_ = 0.1
elif "b4" in checkpoint_url:
lowercase_ = 4
elif "l7" in checkpoint_url:
lowercase_ = 7
lowercase_ = 10_24
lowercase_ = 40_96
lowercase_ = 24
lowercase_ = 16
lowercase_ = 0.1
lowercase_ = ViTMSNModel(__lowerCAmelCase )
lowercase_ = torch.hub.load_state_dict_from_url(__lowerCAmelCase , map_location="""cpu""" )["""target_encoder"""]
lowercase_ = ViTImageProcessor(size=config.image_size )
remove_projection_head(__lowerCAmelCase )
lowercase_ = create_rename_keys(__lowerCAmelCase , base_model=__lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase , base_model=__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
lowercase_ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase_ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
lowercase_ = ViTImageProcessor(
size=config.image_size , image_mean=__lowerCAmelCase , image_std=__lowerCAmelCase )
lowercase_ = image_processor(images=__lowerCAmelCase , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
lowercase_ = model(**__lowerCAmelCase )
lowercase_ = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowercase_ = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] )
elif "b16" in checkpoint_url:
lowercase_ = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] )
elif "l16" in checkpoint_url:
lowercase_ = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] )
elif "b4" in checkpoint_url:
lowercase_ = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] )
else:
lowercase_ = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , __lowerCAmelCase , atol=1E-4 )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCAmelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
UpperCAmelCase : Tuple = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 313
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
UpperCAmelCase : List[Any] = None
UpperCAmelCase : int = logging.get_logger(__name__)
UpperCAmelCase : Optional[Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase : List[str] = {
"vocab_file": {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model",
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"
),
},
"tokenizer_file": {
"google/bigbird-roberta-base": (
"https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json"
),
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase : Any = {
"google/bigbird-roberta-base": 4096,
"google/bigbird-roberta-large": 4096,
"google/bigbird-base-trivia-itc": 4096,
}
UpperCAmelCase : Optional[int] = "▁"
class SCREAMING_SNAKE_CASE__ ( a__ ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = BigBirdTokenizer
lowercase__ = ["input_ids", "attention_mask"]
lowercase__ = []
def __init__( self : List[Any] , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Any="<unk>" , lowerCAmelCase_ : Tuple="<s>" , lowerCAmelCase_ : Union[str, Any]="</s>" , lowerCAmelCase_ : Any="<pad>" , lowerCAmelCase_ : Dict="[SEP]" , lowerCAmelCase_ : int="[MASK]" , lowerCAmelCase_ : Union[str, Any]="[CLS]" , **lowerCAmelCase_ : List[Any] , ):
"""simple docstring"""
lowercase_ = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else bos_token
lowercase_ = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else eos_token
lowercase_ = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else unk_token
lowercase_ = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else pad_token
lowercase_ = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else cls_token
lowercase_ = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase_ = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else mask_token
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , **lowerCAmelCase_ , )
lowercase_ = vocab_file
lowercase_ = False if not self.vocab_file else True
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None):
"""simple docstring"""
lowercase_ = [self.sep_token_id]
lowercase_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : bool = False):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""")
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase_)) + [1]
return [1] + ([0] * len(lowerCAmelCase_)) + [1] + ([0] * len(lowerCAmelCase_)) + [1]
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None):
"""simple docstring"""
lowercase_ = [self.sep_token_id]
lowercase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""")
if not os.path.isdir(lowerCAmelCase_):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''')
return
lowercase_ = os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase_):
copyfile(self.vocab_file , lowerCAmelCase_)
return (out_vocab_file,)
| 370
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
UpperCAmelCase : Tuple = logging.get_logger(__name__)
UpperCAmelCase : Optional[int] = {
"deepmind/language-perceiver": "https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "perceiver"
def __init__( self : Optional[int] , lowerCAmelCase_ : List[str]=2_5_6 , lowerCAmelCase_ : Dict=1_2_8_0 , lowerCAmelCase_ : List[Any]=7_6_8 , lowerCAmelCase_ : Optional[Any]=1 , lowerCAmelCase_ : List[Any]=2_6 , lowerCAmelCase_ : Optional[Any]=8 , lowerCAmelCase_ : Tuple=8 , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Optional[Any]="kv" , lowerCAmelCase_ : Dict=1 , lowerCAmelCase_ : Optional[Any]=1 , lowerCAmelCase_ : List[str]="gelu" , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Union[str, Any]=0.02 , lowerCAmelCase_ : List[Any]=1E-12 , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Union[str, Any]=2_6_2 , lowerCAmelCase_ : Union[str, Any]=2_0_4_8 , lowerCAmelCase_ : Any=5_6 , lowerCAmelCase_ : int=[3_6_8, 4_9_6] , lowerCAmelCase_ : Optional[int]=1_6 , lowerCAmelCase_ : Dict=1_9_2_0 , lowerCAmelCase_ : Optional[Any]=1_6 , lowerCAmelCase_ : Tuple=[1, 1_6, 2_2_4, 2_2_4] , **lowerCAmelCase_ : Union[str, Any] , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_)
lowercase_ = num_latents
lowercase_ = d_latents
lowercase_ = d_model
lowercase_ = num_blocks
lowercase_ = num_self_attends_per_block
lowercase_ = num_self_attention_heads
lowercase_ = num_cross_attention_heads
lowercase_ = qk_channels
lowercase_ = v_channels
lowercase_ = cross_attention_shape_for_attention
lowercase_ = self_attention_widening_factor
lowercase_ = cross_attention_widening_factor
lowercase_ = hidden_act
lowercase_ = attention_probs_dropout_prob
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = use_query_residual
# masked language modeling attributes
lowercase_ = vocab_size
lowercase_ = max_position_embeddings
# image classification attributes
lowercase_ = image_size
# flow attributes
lowercase_ = train_size
# multimodal autoencoding attributes
lowercase_ = num_frames
lowercase_ = audio_samples_per_frame
lowercase_ = samples_per_patch
lowercase_ = output_shape
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
@property
def _UpperCAmelCase ( self : str):
"""simple docstring"""
if self.task == "multiple-choice":
lowercase_ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowercase_ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""inputs""", dynamic_axis),
("""attention_mask""", dynamic_axis),
])
@property
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
return 1E-4
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional[TensorType] = None , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 4_0 , lowerCAmelCase_ : int = 4_0 , ):
"""simple docstring"""
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase_ = compute_effective_axis_dimension(
lowerCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase_ = preprocessor.num_special_tokens_to_add(lowerCAmelCase_)
lowercase_ = compute_effective_axis_dimension(
lowerCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase_)
# Generate dummy inputs according to compute batch and sequence
lowercase_ = [""" """.join(["""a"""]) * seq_length] * batch_size
lowercase_ = dict(preprocessor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_))
lowercase_ = inputs.pop("""input_ids""")
return inputs
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase_ = compute_effective_axis_dimension(lowerCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch)
lowercase_ = self._generate_dummy_images(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = dict(preprocessor(images=lowerCAmelCase_ , return_tensors=lowerCAmelCase_))
lowercase_ = inputs.pop("""pixel_values""")
return inputs
else:
raise ValueError(
"""Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.""")
| 313
| 0
|
"""simple docstring"""
import os
def _SCREAMING_SNAKE_CASE () -> Optional[int]:
'''simple docstring'''
lowercase_ = os.path.join(os.path.dirname(__lowerCAmelCase ) , """num.txt""" )
with open(__lowerCAmelCase ) as file_hand:
return str(sum(int(__lowerCAmelCase ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 371
|
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , unittest.TestCase ):
lowercase__ = BarthezTokenizer
lowercase__ = BarthezTokenizerFast
lowercase__ = True
lowercase__ = True
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
super().setUp()
lowercase_ = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""")
tokenizer.save_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname , legacy_format=lowerCAmelCase_)
lowercase_ = tokenizer
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = """<pad>"""
lowercase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_) , lowerCAmelCase_)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_) , lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , """<s>""")
self.assertEqual(vocab_keys[1] , """<pad>""")
self.assertEqual(vocab_keys[-1] , """<mask>""")
self.assertEqual(len(lowerCAmelCase_) , 1_0_1_1_2_2)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_1_2_2)
@require_torch
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
lowercase_ = [0, 5_7, 3_0_1_8, 7_0_3_0_7, 9_1, 2]
lowercase_ = self.tokenizer(
lowerCAmelCase_ , max_length=len(lowerCAmelCase_) , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors="""pt""")
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_)
self.assertEqual((2, 6) , batch.input_ids.shape)
self.assertEqual((2, 6) , batch.attention_mask.shape)
lowercase_ = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowercase_ = self.get_tokenizer()
lowercase_ = self.get_rust_tokenizer()
lowercase_ = """I was born in 92000, and this is falsé."""
lowercase_ = tokenizer.tokenize(lowerCAmelCase_)
lowercase_ = rust_tokenizer.tokenize(lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_)
lowercase_ = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = self.get_rust_tokenizer()
lowercase_ = tokenizer.encode(lowerCAmelCase_)
lowercase_ = rust_tokenizer.encode(lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
@slow
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = {"""input_ids""": [[0, 4_9_0, 1_4_3_2_8, 4_5_0_7, 3_5_4, 4_7, 4_3_6_6_9, 9_5, 2_5, 7_8_1_1_7, 2_0_2_1_5, 1_9_7_7_9, 1_9_0, 2_2, 4_0_0, 4, 3_5_3_4_3, 8_0_3_1_0, 6_0_3, 8_6, 2_4_9_3_7, 1_0_5, 3_3_4_3_8, 9_4_7_6_2, 1_9_6, 3_9_6_4_2, 7, 1_5, 1_5_9_3_3, 1_7_3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0_5_3_4, 8_7, 2_5, 6_6, 3_3_5_8, 1_9_6, 5_5_2_8_9, 8, 8_2_9_6_1, 8_1, 2_2_0_4, 7_5_2_0_3, 7, 1_5, 7_6_3, 1_2_9_5_6, 2_1_6, 1_7_8, 1_4_3_2_8, 9_5_9_5, 1_3_7_7, 6_9_6_9_3, 7, 4_4_8, 7_1_0_2_1, 1_9_6, 1_8_1_0_6, 1_4_3_7, 1_3_9_7_4, 1_0_8, 9_0_8_3, 4, 4_9_3_1_5, 7, 3_9, 8_6, 1_3_2_6, 2_7_9_3, 4_6_3_3_3, 4, 4_4_8, 1_9_6, 7_4_5_8_8, 7, 4_9_3_1_5, 7, 3_9, 2_1, 8_2_2, 3_8_4_7_0, 7_4, 2_1, 6_6_7_2_3, 6_2_4_8_0, 8, 2_2_0_5_0, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
lowercase_ = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase_ , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=lowerCAmelCase_ , )
| 313
| 0
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(lowerCamelCase__ ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__("doctest").testmod()
| 350
|
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCAmelCase : Optional[Any] = "platform"
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class SCREAMING_SNAKE_CASE__ :
lowercase__ = PegasusConfig
lowercase__ = {}
lowercase__ = "gelu"
def __init__( self : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any]=1_3 , lowerCAmelCase_ : Any=7 , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : str=9_9 , lowerCAmelCase_ : Tuple=3_2 , lowerCAmelCase_ : Dict=5 , lowerCAmelCase_ : Union[str, Any]=4 , lowerCAmelCase_ : Dict=3_7 , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : Optional[int]=2_0 , lowerCAmelCase_ : Tuple=2 , lowerCAmelCase_ : List[str]=1 , lowerCAmelCase_ : Optional[Any]=0 , ):
"""simple docstring"""
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = eos_token_id
lowercase_ = pad_token_id
lowercase_ = bos_token_id
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size).clip(3 , self.vocab_size)
lowercase_ = np.expand_dims(np.array([self.eos_token_id] * self.batch_size) , 1)
lowercase_ = np.concatenate([input_ids, eos_tensor] , axis=1)
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowercase_ = prepare_pegasus_inputs_dict(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
return config, inputs_dict
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
lowercase_ = 2_0
lowercase_ = model_class_name(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""])
lowercase_ , lowercase_ = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowercase_ = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""")
lowercase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase_ = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""")
lowercase_ = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = model.decode(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''')
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict):
"""simple docstring"""
lowercase_ = 2_0
lowercase_ = model_class_name(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""])
lowercase_ , lowercase_ = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowercase_ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
] , axis=-1 , )
lowercase_ = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase_ = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""")
lowercase_ = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = model.decode(lowerCAmelCase_ , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_)
lowercase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''')
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , ) -> Optional[Any]:
'''simple docstring'''
if attention_mask is None:
lowercase_ = np.not_equal(__lowerCAmelCase , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
lowercase_ = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , unittest.TestCase ):
lowercase__ = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
lowercase__ = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
lowercase__ = True
lowercase__ = False
lowercase__ = False
lowercase__ = False
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = FlaxPegasusModelTester(self)
lowercase_ = ConfigTester(self , config_class=lowerCAmelCase_)
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowercase_ = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = model_class(lowerCAmelCase_)
@jax.jit
def encode_jitted(lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int]=None , **lowerCAmelCase_ : Optional[int]):
return model.encode(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_)
with self.subTest("""JIT Enabled"""):
lowercase_ = encode_jitted(**lowerCAmelCase_).to_tuple()
with self.subTest("""JIT Disabled"""):
with jax.disable_jit():
lowercase_ = encode_jitted(**lowerCAmelCase_).to_tuple()
self.assertEqual(len(lowerCAmelCase_) , len(lowerCAmelCase_))
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowercase_ = model_class(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""])
lowercase_ = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict):
return model.decode(
decoder_input_ids=lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , encoder_outputs=lowerCAmelCase_ , )
with self.subTest("""JIT Enabled"""):
lowercase_ = decode_jitted(**lowerCAmelCase_).to_tuple()
with self.subTest("""JIT Disabled"""):
with jax.disable_jit():
lowercase_ = decode_jitted(**lowerCAmelCase_).to_tuple()
self.assertEqual(len(lowerCAmelCase_) , len(lowerCAmelCase_))
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowercase_ = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=lowerCAmelCase_)
lowercase_ = np.ones((1, 1))
lowercase_ = model(lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
@slow
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""")
lowercase_ = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""")
lowercase_ = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
lowercase_ = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
lowercase_ = tokenizer(lowerCAmelCase_ , return_tensors="""np""" , truncation=lowerCAmelCase_ , max_length=5_1_2 , padding=lowerCAmelCase_)
lowercase_ = model.generate(**lowerCAmelCase_ , num_beams=2).sequences
lowercase_ = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_)
assert tgt_text == decoded
| 313
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : List[str] = logging.get_logger(__name__)
UpperCAmelCase : Dict = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class SCREAMING_SNAKE_CASE__ ( __SCREAMING_SNAKE_CASE ):
lowercase__ = "swinv2"
lowercase__ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Dict , lowerCAmelCase_ : List[Any]=2_2_4 , lowerCAmelCase_ : List[Any]=4 , lowerCAmelCase_ : List[Any]=3 , lowerCAmelCase_ : List[str]=9_6 , lowerCAmelCase_ : Dict=[2, 2, 6, 2] , lowerCAmelCase_ : Union[str, Any]=[3, 6, 1_2, 2_4] , lowerCAmelCase_ : Tuple=7 , lowerCAmelCase_ : Dict=4.0 , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Tuple=0.0 , lowerCAmelCase_ : Dict=0.0 , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : List[Any]="gelu" , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : List[str]=0.02 , lowerCAmelCase_ : Any=1E-5 , lowerCAmelCase_ : Any=3_2 , **lowerCAmelCase_ : int , ):
"""simple docstring"""
super().__init__(**_snake_case)
lowercase_ = image_size
lowercase_ = patch_size
lowercase_ = num_channels
lowercase_ = embed_dim
lowercase_ = depths
lowercase_ = len(_snake_case)
lowercase_ = num_heads
lowercase_ = window_size
lowercase_ = mlp_ratio
lowercase_ = qkv_bias
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = drop_path_rate
lowercase_ = hidden_act
lowercase_ = use_absolute_embeddings
lowercase_ = layer_norm_eps
lowercase_ = initializer_range
lowercase_ = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowercase_ = int(embed_dim * 2 ** (len(_snake_case) - 1))
lowercase_ = (0, 0, 0, 0)
| 351
|
"""simple docstring"""
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class SCREAMING_SNAKE_CASE__ :
lowercase__ = None
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = None
lowercase__ = None
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = True
lowercase__ = None
lowercase__ = 1
lowercase__ = None
lowercase__ = False
lowercase__ = None
lowercase__ = None
def _UpperCAmelCase ( self : int):
"""simple docstring"""
return self.__class__(**{k: copy.deepcopy(lowerCAmelCase_) for k, v in self.__dict__.items()})
| 313
| 0
|
"""simple docstring"""
import string
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = ''''''
for i in sequence:
lowercase_ = ord(UpperCamelCase__ )
if 65 <= extract <= 90:
output += chr(1_55 - extract )
elif 97 <= extract <= 1_22:
output += chr(2_19 - extract )
else:
output += i
return output
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = string.ascii_letters
lowercase_ = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(UpperCamelCase__ )] if c in letters else c for c in sequence )
def _SCREAMING_SNAKE_CASE () -> Optional[Any]:
'''simple docstring'''
from timeit import timeit
print("""Running performance benchmarks...""" )
lowercase_ = '''from string import printable ; from __main__ import atbash, atbash_slow'''
print(F'''> atbash_slow(): {timeit("atbash_slow(printable)" , setup=UpperCamelCase__ )} seconds''' )
print(F'''> atbash(): {timeit("atbash(printable)" , setup=UpperCamelCase__ )} seconds''' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(F"{example} encrypted in atbash: {atbash(example)}")
benchmark()
| 352
|
"""simple docstring"""
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
UpperCAmelCase : Any = logging.get_logger(__name__)
UpperCAmelCase : Dict = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase : List[Any] = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
UpperCAmelCase : Union[str, Any] = {
"allenai/led-base-16384": 1_6384,
}
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = LEDTokenizer
lowercase__ = ["input_ids", "attention_mask"]
def __init__( self : Dict , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[Any]="replace" , lowerCAmelCase_ : Dict="<s>" , lowerCAmelCase_ : Union[str, Any]="</s>" , lowerCAmelCase_ : List[Any]="</s>" , lowerCAmelCase_ : Optional[Any]="<s>" , lowerCAmelCase_ : Union[str, Any]="<unk>" , lowerCAmelCase_ : List[str]="<pad>" , lowerCAmelCase_ : Dict="<mask>" , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : List[Any]=True , **lowerCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(
lowerCAmelCase_ , lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , errors=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , trim_offsets=lowerCAmelCase_ , **lowerCAmelCase_ , )
lowercase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get("""add_prefix_space""" , lowerCAmelCase_) != add_prefix_space:
lowercase_ = getattr(lowerCAmelCase_ , pre_tok_state.pop("""type"""))
lowercase_ = add_prefix_space
lowercase_ = pre_tok_class(**lowerCAmelCase_)
lowercase_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase_ = """post_processor"""
lowercase_ = getattr(self.backend_tokenizer , lowerCAmelCase_ , lowerCAmelCase_)
if tokenizer_component_instance:
lowercase_ = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase_ = tuple(state["""sep"""])
if "cls" in state:
lowercase_ = tuple(state["""cls"""])
lowercase_ = False
if state.get("""add_prefix_space""" , lowerCAmelCase_) != add_prefix_space:
lowercase_ = add_prefix_space
lowercase_ = True
if state.get("""trim_offsets""" , lowerCAmelCase_) != trim_offsets:
lowercase_ = trim_offsets
lowercase_ = True
if changes_to_apply:
lowercase_ = getattr(lowerCAmelCase_ , state.pop("""type"""))
lowercase_ = component_class(**lowerCAmelCase_)
setattr(self.backend_tokenizer , lowerCAmelCase_ , lowerCAmelCase_)
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""")
return None
return str(self._mask_token)
@mask_token.setter
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : str):
"""simple docstring"""
lowercase_ = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else value
lowercase_ = value
def _UpperCAmelCase ( self : Dict , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : List[Any]):
"""simple docstring"""
lowercase_ = kwargs.get("""is_split_into_words""" , lowerCAmelCase_)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""")
return super()._batch_encode_plus(*lowerCAmelCase_ , **lowerCAmelCase_)
def _UpperCAmelCase ( self : Union[str, Any] , *lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : Any):
"""simple docstring"""
lowercase_ = kwargs.get("""is_split_into_words""" , lowerCAmelCase_)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""")
return super()._encode_plus(*lowerCAmelCase_ , **lowerCAmelCase_)
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None):
"""simple docstring"""
lowercase_ = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_)
return tuple(lowerCAmelCase_)
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any]=None):
"""simple docstring"""
lowercase_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None):
"""simple docstring"""
lowercase_ = [self.sep_token_id]
lowercase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[bool] = None , ):
"""simple docstring"""
lowercase_ = super()._pad(
encoded_inputs=lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding_strategy=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , )
# Load from model defaults
if return_attention_mask is None:
lowercase_ = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowercase_ = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowercase_ = len(encoded_inputs["""global_attention_mask"""]) != len(lowerCAmelCase_)
if needs_to_be_padded:
lowercase_ = len(lowerCAmelCase_) - len(encoded_inputs["""global_attention_mask"""])
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowercase_ = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
lowercase_ = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side))
return encoded_inputs
| 313
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.