code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class __SCREAMING_SNAKE_CASE :
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
A__ : List[Any] = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
A__ : Tuple = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
A__ : List[str] = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
A__ : Dict = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0001 , beta_end=0.02 , thresholding=snake_case , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
A__ : Optional[Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
A__ : Union[str, Any] = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
A__ : int = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
A__ : str = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , class_embed_type="""timestep""" , mid_block_scale_factor=1.414 , time_embedding_act_fn="""gelu""" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
A__ : Optional[int] = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0001 , beta_end=0.02 , thresholding=snake_case , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
A__ : str = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
A__ : Any = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
A__ : str = self.get_dummy_components()
A__ : Optional[int] = self.pipeline_class(**snake_case )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
A__ : Dict = self.get_dummy_inputs(snake_case )
A__ : List[Any] = inputs["""prompt"""]
A__ : int = inputs["""generator"""]
A__ : List[Any] = inputs["""num_inference_steps"""]
A__ : Any = inputs["""output_type"""]
if "image" in inputs:
A__ : Any = inputs["""image"""]
else:
A__ : Dict = None
if "mask_image" in inputs:
A__ : Optional[Any] = inputs["""mask_image"""]
else:
A__ : List[Any] = None
if "original_image" in inputs:
A__ : List[Any] = inputs["""original_image"""]
else:
A__ : Optional[int] = None
A__ , A__ : str = pipe.encode_prompt(snake_case )
# inputs with prompt converted to embeddings
A__ : List[Any] = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
A__ : Optional[int] = image
if mask_image is not None:
A__ : List[str] = mask_image
if original_image is not None:
A__ : Optional[Any] = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(snake_case , snake_case , snake_case )
A__ : str = pipe(**snake_case )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(snake_case )
A__ : str = self.pipeline_class.from_pretrained(snake_case )
pipe_loaded.to(snake_case )
pipe_loaded.set_progress_bar_config(disable=snake_case )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(snake_case , snake_case ) is None , F'`{optional_component}` did not stay set to None after loading.' , )
A__ : Dict = self.get_dummy_inputs(snake_case )
A__ : Optional[int] = inputs["""generator"""]
A__ : Tuple = inputs["""num_inference_steps"""]
A__ : List[str] = inputs["""output_type"""]
# inputs with prompt converted to embeddings
A__ : Optional[Any] = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
A__ : Dict = image
if mask_image is not None:
A__ : Optional[Any] = mask_image
if original_image is not None:
A__ : Tuple = original_image
A__ : Tuple = pipe_loaded(**snake_case )[0]
A__ : Optional[Any] = np.abs(to_np(snake_case ) - to_np(snake_case ) ).max()
self.assertLess(snake_case , 1e-4 )
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : Optional[int] = self.get_dummy_components()
A__ : List[Any] = self.pipeline_class(**snake_case )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
A__ : Tuple = self.get_dummy_inputs(snake_case )
A__ : Dict = pipe(**snake_case )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(snake_case )
A__ : Optional[Any] = self.pipeline_class.from_pretrained(snake_case )
pipe_loaded.to(snake_case )
pipe_loaded.set_progress_bar_config(disable=snake_case )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
A__ : str = self.get_dummy_inputs(snake_case )
A__ : Tuple = pipe_loaded(**snake_case )[0]
A__ : Dict = np.abs(to_np(snake_case ) - to_np(snake_case ) ).max()
self.assertLess(snake_case , 1e-4 )
| 296
|
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
A_ = object()
# For specifying empty leaf dict `{}`
A_ = object()
def _lowerCAmelCase ( UpperCAmelCase__ : Tuple, UpperCAmelCase__ : List[Any] ) ->Dict:
A__ : Union[str, Any] = tuple((re.compile(x + """$""" ) for x in qs) )
for i in range(len(UpperCAmelCase__ ) - len(UpperCAmelCase__ ) + 1 ):
A__ : Optional[Any] = [x.match(UpperCAmelCase__ ) for x, y in zip(UpperCAmelCase__, ks[i:] )]
if matches and all(UpperCAmelCase__ ):
return True
return False
def _lowerCAmelCase ( UpperCAmelCase__ : List[Any] ) ->Dict:
def replace(UpperCAmelCase__ : int, UpperCAmelCase__ : List[str] ):
for rule, replacement in rules:
if _match(UpperCAmelCase__, UpperCAmelCase__ ):
return replacement
return val
return replace
def _lowerCAmelCase ( ) ->Tuple:
return [
# embeddings
(("transformer", "wpe", "embedding"), P("""mp""", UpperCAmelCase__ )),
(("transformer", "wte", "embedding"), P("""mp""", UpperCAmelCase__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(UpperCAmelCase__, """mp""" )),
(("attention", "out_proj", "kernel"), P("""mp""", UpperCAmelCase__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(UpperCAmelCase__, """mp""" )),
(("mlp", "c_fc", "bias"), P("""mp""" )),
(("mlp", "c_proj", "kernel"), P("""mp""", UpperCAmelCase__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def _lowerCAmelCase ( UpperCAmelCase__ : Tuple ) ->Any:
A__ : Union[str, Any] = _get_partition_rules()
A__ : int = _replacement_rules(UpperCAmelCase__ )
A__ : Tuple = {k: _unmatched for k in flatten_dict(UpperCAmelCase__ )}
A__ : Optional[int] = {k: replace(UpperCAmelCase__, UpperCAmelCase__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(UpperCAmelCase__ ) )
| 296
| 1
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
A_ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
A_ = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def _lowerCAmelCase ( UpperCAmelCase__ : List[Any], UpperCAmelCase__ : List[str], UpperCAmelCase__ : Tuple, UpperCAmelCase__ : Tuple, UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Any ) ->int:
for attribute in key.split(""".""" ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
A__ : Tuple = """lm_head"""
A__ : Optional[Any] = getattr(UpperCAmelCase__, UpperCAmelCase__ )
if weight_type is not None:
A__ : int = getattr(UpperCAmelCase__, UpperCAmelCase__ ).shape
else:
A__ : Union[str, Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
A__ : Optional[Any] = value
elif weight_type == "weight_g":
A__ : int = value
elif weight_type == "weight_v":
A__ : Union[str, Any] = value
elif weight_type == "bias":
A__ : Tuple = value
else:
A__ : Optional[Any] = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Any, UpperCAmelCase__ : Any ) ->List[str]:
A__ : Optional[int] = []
A__ : List[str] = fairseq_model.state_dict()
A__ : Any = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
A__ : Optional[Any] = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, hf_model.config.feat_extract_norm == """group""", )
A__ : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
A__ : Optional[Any] = """unispeech.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
A__ : int = True
if "*" in mapped_key:
A__ : Any = name.split(UpperCAmelCase__ )[0].split(""".""" )[-2]
A__ : List[str] = mapped_key.replace("""*""", UpperCAmelCase__ )
if "weight_g" in name:
A__ : List[Any] = """weight_g"""
elif "weight_v" in name:
A__ : Dict = """weight_v"""
elif "bias" in name:
A__ : List[str] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A__ : Any = """weight"""
else:
A__ : List[str] = None
set_recursively(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
continue
if not is_used:
unused_weights.append(UpperCAmelCase__ )
logger.warning(f'Unused weights: {unused_weights}' )
def _lowerCAmelCase ( UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Tuple, UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Dict, UpperCAmelCase__ : Tuple ) ->int:
A__ : int = full_name.split("""conv_layers.""" )[-1]
A__ : Optional[Any] = name.split(""".""" )
A__ : List[Any] = int(items[0] )
A__ : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
A__ : Optional[int] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
A__ : List[Any] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
A__ : Optional[Any] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
A__ : List[str] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(UpperCAmelCase__ )
@torch.no_grad()
def _lowerCAmelCase ( UpperCAmelCase__ : List[Any], UpperCAmelCase__ : List[str], UpperCAmelCase__ : Tuple=None, UpperCAmelCase__ : str=None, UpperCAmelCase__ : Optional[int]=True ) ->Union[str, Any]:
if config_path is not None:
A__ : Optional[int] = UniSpeechConfig.from_pretrained(UpperCAmelCase__ )
else:
A__ : Any = UniSpeechConfig()
if is_finetuned:
if dict_path:
A__ : int = Dictionary.load_from_json(UpperCAmelCase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
A__ : Any = target_dict.pad_index
A__ : Union[str, Any] = target_dict.bos_index
A__ : Tuple = target_dict.eos_index
A__ : int = len(target_dict.symbols )
A__ : int = os.path.join(UpperCAmelCase__, """vocab.json""" )
if not os.path.isdir(UpperCAmelCase__ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(UpperCAmelCase__ ) )
return
os.makedirs(UpperCAmelCase__, exist_ok=UpperCAmelCase__ )
A__ : str = target_dict.indices
# fairseq has the <pad> and <s> switched
A__ : List[str] = 4_2
A__ : List[str] = 4_3
with open(UpperCAmelCase__, """w""", encoding="""utf-8""" ) as vocab_handle:
json.dump(UpperCAmelCase__, UpperCAmelCase__ )
A__ : Union[str, Any] = WavaVecaPhonemeCTCTokenizer(
UpperCAmelCase__, unk_token=target_dict.unk_word, pad_token=target_dict.pad_word, bos_token=target_dict.bos_word, eos_token=target_dict.eos_word, word_delimiter_token="""|""", do_lower_case=UpperCAmelCase__, )
A__ : Union[str, Any] = True if config.feat_extract_norm == """layer""" else False
A__ : Dict = WavaVecaFeatureExtractor(
feature_size=1, sampling_rate=1_6_0_0_0, padding_value=0, do_normalize=UpperCAmelCase__, return_attention_mask=UpperCAmelCase__, )
A__ : Optional[int] = WavaVecaProcessor(feature_extractor=UpperCAmelCase__, tokenizer=UpperCAmelCase__ )
processor.save_pretrained(UpperCAmelCase__ )
A__ : Any = UniSpeechForCTC(UpperCAmelCase__ )
else:
A__ : Union[str, Any] = UniSpeechForPreTraining(UpperCAmelCase__ )
if is_finetuned:
A__ , A__ , A__ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] ), """w2v_path""": checkpoint_path} )
else:
A__ , A__ , A__ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
A__ : str = model[0].eval()
recursively_load_weights(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
hf_unispeech.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
A_ = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 296
|
"""simple docstring"""
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[str] , snake_case : Tuple , snake_case : List[str]=2 , snake_case : List[str]=8 , snake_case : List[Any]=True , snake_case : Optional[Any]=True , snake_case : List[Any]=True , snake_case : Dict=True , snake_case : Tuple=99 , snake_case : Dict=16 , snake_case : Dict=5 , snake_case : int=2 , snake_case : Any=36 , snake_case : str="gelu" , snake_case : Dict=0.0 , snake_case : List[Any]=0.0 , snake_case : int=512 , snake_case : List[Any]=16 , snake_case : Tuple=2 , snake_case : Any=0.02 , snake_case : Optional[Any]=3 , snake_case : List[Any]=4 , snake_case : str=None , ):
'''simple docstring'''
A__ : Union[str, Any] = parent
A__ : Optional[Any] = batch_size
A__ : Dict = seq_length
A__ : str = is_training
A__ : Tuple = use_input_mask
A__ : Dict = use_token_type_ids
A__ : Dict = use_labels
A__ : int = vocab_size
A__ : List[str] = hidden_size
A__ : Union[str, Any] = num_hidden_layers
A__ : int = num_attention_heads
A__ : List[str] = intermediate_size
A__ : int = hidden_act
A__ : str = hidden_dropout_prob
A__ : Tuple = attention_probs_dropout_prob
A__ : Any = max_position_embeddings
A__ : Optional[int] = type_vocab_size
A__ : int = type_sequence_label_size
A__ : Optional[Any] = initializer_range
A__ : int = num_labels
A__ : Optional[int] = num_choices
A__ : Optional[int] = scope
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
A__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Any = None
if self.use_input_mask:
A__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
A__ : Optional[int] = None
if self.use_token_type_ids:
A__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ : Dict = None
A__ : List[str] = None
A__ : Union[str, Any] = None
if self.use_labels:
A__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : Any = ids_tensor([self.batch_size] , self.num_choices )
A__ : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : Any = self.get_config()
A__ : List[str] = 300
return config
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : Tuple = self.prepare_config_and_inputs()
A__ : List[str] = True
A__ : List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A__ : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _UpperCamelCase ( self : Any , snake_case : Any , snake_case : Tuple , snake_case : Any , snake_case : Optional[int] , snake_case : Optional[int] , snake_case : Optional[int] , snake_case : Dict ):
'''simple docstring'''
A__ : List[str] = MraModel(config=snake_case )
model.to(snake_case )
model.eval()
A__ : Dict = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )
A__ : List[str] = model(snake_case , token_type_ids=snake_case )
A__ : Union[str, Any] = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : Optional[Any] , snake_case : List[Any] , snake_case : Any , snake_case : Optional[Any] , snake_case : Union[str, Any] , snake_case : Tuple , snake_case : Dict , snake_case : str , snake_case : Dict , snake_case : str , ):
'''simple docstring'''
A__ : Dict = True
A__ : Optional[Any] = MraModel(snake_case )
model.to(snake_case )
model.eval()
A__ : Union[str, Any] = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , )
A__ : str = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , encoder_hidden_states=snake_case , )
A__ : Optional[int] = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : int , snake_case : Optional[Any] , snake_case : Optional[int] , snake_case : int , snake_case : str , snake_case : Union[str, Any] , snake_case : Dict , snake_case : List[str] ):
'''simple docstring'''
A__ : Union[str, Any] = MraForMaskedLM(config=snake_case )
model.to(snake_case )
model.eval()
A__ : List[Any] = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self : Optional[Any] , snake_case : Dict , snake_case : Dict , snake_case : Dict , snake_case : List[str] , snake_case : List[str] , snake_case : Tuple , snake_case : Union[str, Any] ):
'''simple docstring'''
A__ : Dict = MraForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
A__ : str = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , start_positions=snake_case , end_positions=snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self : Tuple , snake_case : List[Any] , snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : List[str] , snake_case : Optional[int] , snake_case : List[str] , snake_case : Union[str, Any] ):
'''simple docstring'''
A__ : str = self.num_labels
A__ : Optional[Any] = MraForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
A__ : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self : Union[str, Any] , snake_case : Dict , snake_case : str , snake_case : List[Any] , snake_case : Any , snake_case : Dict , snake_case : Tuple , snake_case : Optional[Any] ):
'''simple docstring'''
A__ : str = self.num_labels
A__ : Union[str, Any] = MraForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
A__ : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self : Tuple , snake_case : Optional[Any] , snake_case : Optional[int] , snake_case : int , snake_case : Optional[Any] , snake_case : List[str] , snake_case : Dict , snake_case : Optional[Any] ):
'''simple docstring'''
A__ : List[str] = self.num_choices
A__ : str = MraForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
A__ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : str = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : List[str] = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : Dict = config_and_inputs
A__ : Optional[int] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase ):
snake_case_ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = ()
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : Optional[Any] = MraModelTester(self )
A__ : List[str] = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A__ : List[str] = type
self.model_tester.create_and_check_model(*snake_case )
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case )
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case )
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
A__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case )
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case )
@slow
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : str = MraModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@unittest.skip(reason="""MRA does not output attentions""" )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
return
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : str = MraModel.from_pretrained("""uw-madison/mra-base-512-4""" )
A__ : Any = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
A__ : List[Any] = model(snake_case )[0]
A__ : List[Any] = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , snake_case )
A__ : int = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) )
@slow
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : Union[str, Any] = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-512-4""" )
A__ : Tuple = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
A__ : List[Any] = model(snake_case )[0]
A__ : Dict = 5_0265
A__ : List[str] = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , snake_case )
A__ : List[Any] = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) )
@slow
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
A__ : Any = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-4096-8-d3""" )
A__ : List[Any] = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
A__ : List[Any] = model(snake_case )[0]
A__ : Union[str, Any] = 5_0265
A__ : Optional[Any] = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape , snake_case )
A__ : Optional[int] = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) )
| 296
| 1
|
"""simple docstring"""
def _lowerCAmelCase ( UpperCAmelCase__ : list, UpperCAmelCase__ : list, UpperCAmelCase__ : int, UpperCAmelCase__ : int, UpperCAmelCase__ : int ) ->int:
if index == number_of_items:
return 0
A__ : Dict = 0
A__ : Dict = 0
A__ : Any = knapsack(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, index + 1 )
if weights[index] <= max_weight:
A__ : List[Any] = values[index] + knapsack(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, max_weight - weights[index], index + 1 )
return max(UpperCAmelCase__, UpperCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 296
|
"""simple docstring"""
from sklearn.metrics import mean_squared_error
import datasets
A_ = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
A_ = '''\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
'''
A_ = '''
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric("mse")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{\'mse\': 0.6123724356957945}
If you\'re using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric("mse", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mse\': array([0.41666667, 1. ])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"""
] , )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("""float""" ) ),
"references": datasets.Sequence(datasets.Value("""float""" ) ),
}
else:
return {
"predictions": datasets.Value("""float""" ),
"references": datasets.Value("""float""" ),
}
def _UpperCamelCase ( self : List[str] , snake_case : Dict , snake_case : List[Any] , snake_case : List[str]=None , snake_case : List[Any]="uniform_average" , snake_case : int=True ):
'''simple docstring'''
A__ : Optional[int] = mean_squared_error(
snake_case , snake_case , sample_weight=snake_case , multioutput=snake_case , squared=snake_case )
return {"mse": mse}
| 296
| 1
|
"""simple docstring"""
import string
def _lowerCAmelCase ( UpperCAmelCase__ : str ) ->None:
for key in range(len(string.ascii_uppercase ) ):
A__ : Optional[Any] = """"""
for symbol in message:
if symbol in string.ascii_uppercase:
A__ : List[Any] = string.ascii_uppercase.find(UpperCAmelCase__ )
A__ : List[str] = num - key
if num < 0:
A__ : int = num + len(string.ascii_uppercase )
A__ : List[Any] = translated + string.ascii_uppercase[num]
else:
A__ : str = translated + symbol
print(f'Decryption using Key #{key}: {translated}' )
def _lowerCAmelCase ( ) ->None:
A__ : List[str] = input("""Encrypted message: """ )
A__ : str = message.upper()
decrypt(UpperCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 296
|
"""simple docstring"""
import warnings
from ..trainer import Trainer
from ..utils import logging
A_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
def __init__( self : Optional[int] , snake_case : List[str]=None , **snake_case : Any ):
'''simple docstring'''
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""" , snake_case , )
super().__init__(args=snake_case , **snake_case )
| 296
| 1
|
"""simple docstring"""
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , snake_case : str , snake_case : List[Any]=13 , snake_case : Union[str, Any]=7 , snake_case : Optional[Any]=True , snake_case : str=True , snake_case : Dict=False , snake_case : Union[str, Any]=True , snake_case : Optional[Any]=99 , snake_case : str=32 , snake_case : Tuple=5 , snake_case : List[str]=4 , snake_case : Optional[int]=37 , snake_case : str="gelu" , snake_case : Tuple=0.1 , snake_case : Optional[int]=0.1 , snake_case : int=512 , snake_case : List[str]=16 , snake_case : str=2 , snake_case : Optional[int]=0.02 , snake_case : str=3 , snake_case : Dict=4 , snake_case : Optional[Any]=None , ):
'''simple docstring'''
A__ : int = parent
A__ : Union[str, Any] = batch_size
A__ : Optional[int] = seq_length
A__ : List[Any] = is_training
A__ : List[str] = use_input_mask
A__ : Optional[Any] = use_token_type_ids
A__ : List[Any] = use_labels
A__ : Union[str, Any] = vocab_size
A__ : List[Any] = hidden_size
A__ : Any = num_hidden_layers
A__ : Any = num_attention_heads
A__ : Optional[int] = intermediate_size
A__ : Any = hidden_act
A__ : Tuple = hidden_dropout_prob
A__ : Dict = attention_probs_dropout_prob
A__ : Optional[int] = max_position_embeddings
A__ : Tuple = type_vocab_size
A__ : Union[str, Any] = type_sequence_label_size
A__ : List[str] = initializer_range
A__ : Any = num_labels
A__ : Any = num_choices
A__ : int = scope
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Tuple = None
if self.use_input_mask:
A__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
A__ : Union[str, Any] = None
if self.use_token_type_ids:
A__ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ : int = None
A__ : int = None
A__ : List[str] = None
if self.use_labels:
A__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
A__ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , )
def _UpperCamelCase ( self : Optional[int] , snake_case : Dict , snake_case : Any , snake_case : Dict , snake_case : Any , snake_case : Union[str, Any] , snake_case : Tuple , snake_case : Optional[Any] ):
'''simple docstring'''
A__ : Optional[Any] = BioGptModel(config=snake_case )
model.to(snake_case )
model.eval()
A__ : List[Any] = model(snake_case , attention_mask=snake_case )
A__ : Dict = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : Optional[int] , snake_case : Dict , snake_case : Optional[int] , snake_case : List[str] , snake_case : str , snake_case : Optional[Any] , snake_case : List[str] , snake_case : List[Any] , snake_case : Tuple , snake_case : Optional[Any] , ):
'''simple docstring'''
A__ : List[str] = BioGptForCausalLM(config=snake_case )
model.to(snake_case )
model.eval()
A__ : Union[str, Any] = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self : Any , snake_case : str , snake_case : Tuple , snake_case : int , snake_case : Optional[Any] , snake_case : Any , *snake_case : Dict ):
'''simple docstring'''
A__ : Union[str, Any] = BioGptModel(config=snake_case )
model.to(snake_case )
model.eval()
# create attention mask
A__ : List[Any] = torch.ones(input_ids.shape , dtype=torch.long , device=snake_case )
A__ : Any = self.seq_length // 2
A__ : str = 0
# first forward pass
A__ , A__ : List[Any] = model(snake_case , attention_mask=snake_case ).to_tuple()
# create hypothetical next token and extent to next_input_ids
A__ : int = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
A__ : List[str] = ids_tensor((1,) , snake_case ).item() + 1
A__ : Optional[int] = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
A__ : int = random_other_next_tokens
# append to next input_ids and attn_mask
A__ : str = torch.cat([input_ids, next_tokens] , dim=-1 )
A__ : List[Any] = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=snake_case )] , dim=1 , )
# get two different outputs
A__ : Union[str, Any] = model(snake_case , attention_mask=snake_case )["""last_hidden_state"""]
A__ : Optional[int] = model(snake_case , past_key_values=snake_case , attention_mask=snake_case )["""last_hidden_state"""]
# select random slice
A__ : List[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A__ : List[str] = output_from_no_past[:, -1, random_slice_idx].detach()
A__ : Any = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case , snake_case , atol=1e-3 ) )
def _UpperCamelCase ( self : int , snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : str , snake_case : int , snake_case : Optional[Any] , *snake_case : str ):
'''simple docstring'''
A__ : Dict = BioGptModel(config=snake_case ).to(snake_case ).eval()
A__ : Tuple = torch.ones(input_ids.shape , dtype=torch.long , device=snake_case )
# first forward pass
A__ : Dict = model(snake_case , attention_mask=snake_case , use_cache=snake_case )
A__ , A__ : List[Any] = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
A__ : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ : int = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
A__ : str = torch.cat([input_ids, next_tokens] , dim=-1 )
A__ : Optional[int] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
A__ : Any = model(snake_case , attention_mask=snake_case )["""last_hidden_state"""]
A__ : Union[str, Any] = model(snake_case , attention_mask=snake_case , past_key_values=snake_case )[
"""last_hidden_state"""
]
# select random slice
A__ : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A__ : Any = output_from_no_past[:, -3:, random_slice_idx].detach()
A__ : List[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case , snake_case , atol=1e-3 ) )
def _UpperCamelCase ( self : Optional[Any] , snake_case : Tuple , snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : Any , snake_case : Tuple , *snake_case : Union[str, Any] , snake_case : Union[str, Any]=False ):
'''simple docstring'''
A__ : Tuple = BioGptForCausalLM(snake_case )
model.to(snake_case )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
A__ : Optional[Any] = model(snake_case , labels=snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def _UpperCamelCase ( self : int , snake_case : Optional[Any] , *snake_case : Optional[int] ):
'''simple docstring'''
A__ : int = BioGptModel(snake_case )
A__ : Union[str, Any] = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def _UpperCamelCase ( self : Any , snake_case : Dict , snake_case : Tuple , snake_case : int , snake_case : Union[str, Any] , snake_case : Dict , *snake_case : Any ):
'''simple docstring'''
A__ : Union[str, Any] = self.num_labels
A__ : int = BioGptForTokenClassification(snake_case )
model.to(snake_case )
model.eval()
A__ : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : List[str] = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : str = config_and_inputs
A__ : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase ):
snake_case_ = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
snake_case_ = (BioGptForCausalLM,) if is_torch_available() else ()
snake_case_ = (
{
'feature-extraction': BioGptModel,
'text-classification': BioGptForSequenceClassification,
'text-generation': BioGptForCausalLM,
'token-classification': BioGptForTokenClassification,
'zero-shot': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = False
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : List[str] = BioGptModelTester(self )
A__ : List[Any] = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
A__ : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A__ : str = type
self.model_tester.create_and_check_model(*snake_case )
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*snake_case )
def _UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
A__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*snake_case , gradient_checkpointing=snake_case )
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
A__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*snake_case )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*snake_case )
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
A__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*snake_case )
@slow
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ : Tuple = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
model.to(snake_case )
A__ : Optional[int] = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
A__ : Any = """left"""
# Define PAD Token = EOS Token = 50256
A__ : Optional[int] = tokenizer.eos_token
A__ : Dict = model.config.eos_token_id
# use different length sentences to test batching
A__ : Union[str, Any] = [
"""Hello, my dog is a little""",
"""Today, I""",
]
A__ : List[str] = tokenizer(snake_case , return_tensors="""pt""" , padding=snake_case )
A__ : str = inputs["""input_ids"""].to(snake_case )
A__ : Dict = model.generate(
input_ids=snake_case , attention_mask=inputs["""attention_mask"""].to(snake_case ) , )
A__ : Optional[int] = tokenizer(sentences[0] , return_tensors="""pt""" ).input_ids.to(snake_case )
A__ : Any = model.generate(input_ids=snake_case )
A__ : List[str] = inputs_non_padded.shape[-1] - inputs["""attention_mask"""][-1].long().sum().cpu().item()
A__ : str = tokenizer(sentences[1] , return_tensors="""pt""" ).input_ids.to(snake_case )
A__ : Dict = model.generate(input_ids=snake_case , max_length=model.config.max_length - num_paddings )
A__ : Optional[Any] = tokenizer.batch_decode(snake_case , skip_special_tokens=snake_case )
A__ : List[Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=snake_case )
A__ : str = tokenizer.decode(output_padded[0] , skip_special_tokens=snake_case )
A__ : Optional[int] = [
"""Hello, my dog is a little bit bigger than a little bit.""",
"""Today, I have a good idea of how to use the information""",
]
self.assertListEqual(snake_case , snake_case )
self.assertListEqual(snake_case , [non_padded_sentence, padded_sentence] )
@slow
def _UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Optional[Any] = BioGptModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ , A__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
A__ : Optional[int] = 3
A__ : List[Any] = input_dict["""input_ids"""]
A__ : Dict = input_ids.ne(1 ).to(snake_case )
A__ : Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
A__ : Union[str, Any] = BioGptForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
A__ : int = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ , A__ : str = self.model_tester.prepare_config_and_inputs_for_common()
A__ : Any = 3
A__ : List[Any] = """multi_label_classification"""
A__ : Dict = input_dict["""input_ids"""]
A__ : Tuple = input_ids.ne(1 ).to(snake_case )
A__ : Any = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
A__ : Tuple = BioGptForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
A__ : List[str] = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
A__ : Optional[Any] = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
A__ : str = torch.tensor([[2, 4805, 9, 656, 21]] )
A__ : Dict = model(snake_case )[0]
A__ : Tuple = 4_2384
A__ : str = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , snake_case )
A__ : str = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) )
@slow
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
A__ : Tuple = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
A__ : Any = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
model.to(snake_case )
torch.manual_seed(0 )
A__ : Tuple = tokenizer("""COVID-19 is""" , return_tensors="""pt""" ).to(snake_case )
A__ : Optional[int] = model.generate(
**snake_case , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=snake_case , )
A__ : Optional[int] = tokenizer.decode(output_ids[0] , skip_special_tokens=snake_case )
A__ : List[str] = (
"""COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the"""
""" causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and"""
""" territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),"""
""" and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and"""
""" more than 800,000 deaths."""
)
self.assertEqual(snake_case , snake_case )
| 296
|
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
A_ = random.Random()
def _lowerCAmelCase ( UpperCAmelCase__ : Dict, UpperCAmelCase__ : Tuple=1.0, UpperCAmelCase__ : Optional[int]=None, UpperCAmelCase__ : str=None ) ->Union[str, Any]:
if rng is None:
A__ : Optional[int] = global_rng
A__ : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Union[str, Any] , snake_case : str , snake_case : List[str]=7 , snake_case : str=400 , snake_case : Optional[Any]=2000 , snake_case : Union[str, Any]=10 , snake_case : str=160 , snake_case : List[str]=8 , snake_case : List[Any]=0.0 , snake_case : Optional[Any]=4000 , snake_case : Any=False , snake_case : int=True , ):
'''simple docstring'''
A__ : Any = parent
A__ : str = batch_size
A__ : List[str] = min_seq_length
A__ : Dict = max_seq_length
A__ : str = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A__ : Dict = padding_value
A__ : Optional[Any] = sampling_rate
A__ : Any = return_attention_mask
A__ : Optional[int] = do_normalize
A__ : Tuple = feature_size
A__ : Optional[Any] = chunk_length
A__ : Union[str, Any] = hop_length
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _UpperCamelCase ( self : Union[str, Any] , snake_case : Dict=False , snake_case : Optional[Any]=False ):
'''simple docstring'''
def _flatten(snake_case : Dict ):
return list(itertools.chain(*snake_case ) )
if equal_length:
A__ : Dict = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
A__ : Optional[int] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
A__ : List[str] = [np.asarray(snake_case ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase ):
snake_case_ = WhisperFeatureExtractor if is_speech_available() else None
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
A__ : str = WhisperFeatureExtractionTester(self )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : List[Any] = feat_extract_first.save_pretrained(snake_case )[0]
check_json_file_has_correct_format(snake_case )
A__ : Union[str, Any] = self.feature_extraction_class.from_pretrained(snake_case )
A__ : str = feat_extract_first.to_dict()
A__ : Union[str, Any] = feat_extract_second.to_dict()
A__ : List[Any] = feat_extract_first.mel_filters
A__ : Optional[Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(snake_case , snake_case ) )
self.assertEqual(snake_case , snake_case )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Any = os.path.join(snake_case , """feat_extract.json""" )
feat_extract_first.to_json_file(snake_case )
A__ : int = self.feature_extraction_class.from_json_file(snake_case )
A__ : Dict = feat_extract_first.to_dict()
A__ : str = feat_extract_second.to_dict()
A__ : str = feat_extract_first.mel_filters
A__ : Dict = feat_extract_second.mel_filters
self.assertTrue(np.allclose(snake_case , snake_case ) )
self.assertEqual(snake_case , snake_case )
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
A__ : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
A__ : str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
A__ : Union[str, Any] = [np.asarray(snake_case ) for speech_input in speech_inputs]
# Test feature size
A__ : Dict = feature_extractor(snake_case , padding="""max_length""" , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
A__ : str = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
A__ : Optional[int] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(snake_case , snake_case , atol=1e-3 ) )
# Test batched
A__ : List[str] = feature_extractor(snake_case , return_tensors="""np""" ).input_features
A__ : List[str] = feature_extractor(snake_case , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(snake_case , snake_case ):
self.assertTrue(np.allclose(snake_case , snake_case , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
A__ : Tuple = [floats_list((1, x) )[0] for x in (800, 800, 800)]
A__ : str = np.asarray(snake_case )
A__ : List[str] = feature_extractor(snake_case , return_tensors="""np""" ).input_features
A__ : Optional[int] = feature_extractor(snake_case , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(snake_case , snake_case ):
self.assertTrue(np.allclose(snake_case , snake_case , atol=1e-3 ) )
# Test truncation required
A__ : Optional[Any] = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
A__ : Union[str, Any] = [np.asarray(snake_case ) for speech_input in speech_inputs]
A__ : Union[str, Any] = [x[: feature_extractor.n_samples] for x in speech_inputs]
A__ : str = [np.asarray(snake_case ) for speech_input in speech_inputs_truncated]
A__ : Optional[int] = feature_extractor(snake_case , return_tensors="""np""" ).input_features
A__ : str = feature_extractor(snake_case , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(snake_case , snake_case ):
self.assertTrue(np.allclose(snake_case , snake_case , atol=1e-3 ) )
def _UpperCamelCase ( self : str ):
'''simple docstring'''
import torch
A__ : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ : List[str] = np.random.rand(100 , 32 ).astype(np.floataa )
A__ : Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
A__ : Optional[Any] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
A__ : Optional[int] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def _UpperCamelCase ( self : Optional[Any] , snake_case : Optional[int] ):
'''simple docstring'''
A__ : int = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
A__ : Union[str, Any] = ds.sort("""id""" ).select(range(snake_case ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : str = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
A__ : Optional[Any] = self._load_datasamples(1 )
A__ : Union[str, Any] = WhisperFeatureExtractor()
A__ : List[str] = feature_extractor(snake_case , return_tensors="""pt""" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , snake_case , atol=1e-4 ) )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ : Union[str, Any] = self._load_datasamples(1 )[0]
A__ : Any = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
A__ : str = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=snake_case )[0]
self.assertTrue(np.all(np.mean(snake_case ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(snake_case ) - 1 ) < 1e-3 ) )
| 296
| 1
|
"""simple docstring"""
import unittest
import numpy as np
def _lowerCAmelCase ( UpperCAmelCase__ : np.ndarray, UpperCAmelCase__ : np.ndarray, UpperCAmelCase__ : np.ndarray, UpperCAmelCase__ : np.ndarray | None = None, ) ->np.ndarray:
A__ : List[Any] = np.shape(UpperCAmelCase__ )
A__ : List[str] = np.shape(UpperCAmelCase__ )
A__ : str = np.shape(UpperCAmelCase__ )
if shape_a[0] != shape_b[0]:
A__ : int = (
"""Expected the same number of rows for A and B. """
f'Instead found A of size {shape_a} and B of size {shape_b}'
)
raise ValueError(UpperCAmelCase__ )
if shape_b[1] != shape_c[1]:
A__ : int = (
"""Expected the same number of columns for B and C. """
f'Instead found B of size {shape_b} and C of size {shape_c}'
)
raise ValueError(UpperCAmelCase__ )
A__ : Any = pseudo_inv
if a_inv is None:
try:
A__ : List[str] = np.linalg.inv(UpperCAmelCase__ )
except np.linalg.LinAlgError:
raise ValueError(
"""Input matrix A is not invertible. Cannot compute Schur complement.""" )
return mat_c - mat_b.T @ a_inv @ mat_b
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
A__ : Optional[int] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
A__ : Dict = np.array([[0, 3], [3, 0], [2, 3]] )
A__ : int = np.array([[2, 1], [6, 3]] )
A__ : List[str] = schur_complement(snake_case , snake_case , snake_case )
A__ : str = np.block([[a, b], [b.T, c]] )
A__ : Optional[Any] = np.linalg.det(snake_case )
A__ : Union[str, Any] = np.linalg.det(snake_case )
A__ : Optional[Any] = np.linalg.det(snake_case )
self.assertAlmostEqual(snake_case , det_a * det_s )
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
A__ : Union[str, Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
A__ : Dict = np.array([[0, 3], [3, 0], [2, 3]] )
A__ : Tuple = np.array([[2, 1], [6, 3]] )
with self.assertRaises(snake_case ):
schur_complement(snake_case , snake_case , snake_case )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : Optional[int] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
A__ : Any = np.array([[0, 3], [3, 0], [2, 3]] )
A__ : Optional[int] = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(snake_case ):
schur_complement(snake_case , snake_case , snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 296
|
"""simple docstring"""
import numpy as np
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[str] ):
'''simple docstring'''
A__ : Optional[int] = (0, 0)
A__ : Dict = None
A__ : int = 0
A__ : str = 0
A__ : Optional[Any] = 0
def __eq__( self : str , snake_case : Optional[int] ):
'''simple docstring'''
return self.position == cell.position
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
print(self.position )
class __SCREAMING_SNAKE_CASE :
def __init__( self : int , snake_case : Any=(5, 5) ):
'''simple docstring'''
A__ : Optional[int] = np.zeros(snake_case )
A__ : List[Any] = world_size[0]
A__ : Dict = world_size[1]
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
print(self.w )
def _UpperCamelCase ( self : Optional[int] , snake_case : List[Any] ):
'''simple docstring'''
A__ : int = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
A__ : int = cell.position[0]
A__ : str = cell.position[1]
A__ : Any = []
for n in neughbour_cord:
A__ : List[Any] = current_x + n[0]
A__ : Tuple = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
A__ : List[Any] = Cell()
A__ : str = (x, y)
A__ : Optional[Any] = cell
neighbours.append(snake_case )
return neighbours
def _lowerCAmelCase ( UpperCAmelCase__ : List[str], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Dict ) ->Dict:
A__ : Union[str, Any] = []
A__ : Optional[int] = []
_open.append(UpperCAmelCase__ )
while _open:
A__ : List[Any] = np.argmin([n.f for n in _open] )
A__ : Union[str, Any] = _open[min_f]
_closed.append(_open.pop(UpperCAmelCase__ ) )
if current == goal:
break
for n in world.get_neigbours(UpperCAmelCase__ ):
for c in _closed:
if c == n:
continue
A__ : Dict = current.g + 1
A__ , A__ : int = n.position
A__ , A__ : Optional[int] = goal.position
A__ : Union[str, Any] = (ya - ya) ** 2 + (xa - xa) ** 2
A__ : Optional[int] = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(UpperCAmelCase__ )
A__ : List[str] = []
while current.parent is not None:
path.append(current.position )
A__ : Union[str, Any] = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
A_ = Gridworld()
# Start position and goal
A_ = Cell()
A_ = (0, 0)
A_ = Cell()
A_ = (4, 4)
print(F'path from {start.position} to {goal.position}')
A_ = astar(world, start, goal)
# Just for visual reasons.
for i in s:
A_ = 1
print(world.w)
| 296
| 1
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
A_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
def __init__( self : Optional[Any] , *snake_case : List[Any] , **snake_case : List[Any] ):
'''simple docstring'''
warnings.warn(
"""The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use FlavaImageProcessor instead.""" , snake_case , )
super().__init__(*snake_case , **snake_case )
| 296
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
def _lowerCAmelCase ( UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Tuple=False ) ->str:
A__ : Optional[int] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'blocks.{i}.norm1.weight', f'deit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'blocks.{i}.norm1.bias', f'deit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((f'blocks.{i}.attn.proj.weight', f'deit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((f'blocks.{i}.attn.proj.bias', f'deit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'blocks.{i}.norm2.weight', f'deit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'blocks.{i}.norm2.bias', f'deit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'deit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'deit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'deit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'deit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """deit.embeddings.cls_token"""),
("""dist_token""", """deit.embeddings.distillation_token"""),
("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """deit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
A__ : Optional[int] = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("""norm.weight""", """deit.layernorm.weight"""),
("""norm.bias""", """deit.layernorm.bias"""),
("""head.weight""", """cls_classifier.weight"""),
("""head.bias""", """cls_classifier.bias"""),
("""head_dist.weight""", """distillation_classifier.weight"""),
("""head_dist.bias""", """distillation_classifier.bias"""),
] )
return rename_keys
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Tuple, UpperCAmelCase__ : List[Any]=False ) ->str:
for i in range(config.num_hidden_layers ):
if base_model:
A__ : Any = """"""
else:
A__ : Tuple = """deit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ : Any = state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
A__ : Tuple = state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
A__ : List[Any] = in_proj_weight[
: config.hidden_size, :
]
A__ : str = in_proj_bias[: config.hidden_size]
A__ : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
A__ : Any = in_proj_bias[-config.hidden_size :]
def _lowerCAmelCase ( UpperCAmelCase__ : List[Any], UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Union[str, Any] ) ->Any:
A__ : int = dct.pop(UpperCAmelCase__ )
A__ : Tuple = val
def _lowerCAmelCase ( ) ->List[Any]:
A__ : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A__ : int = Image.open(requests.get(UpperCAmelCase__, stream=UpperCAmelCase__ ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase ( UpperCAmelCase__ : Dict, UpperCAmelCase__ : Any ) ->Tuple:
A__ : List[Any] = DeiTConfig()
# all deit models have fine-tuned heads
A__ : Tuple = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
A__ : str = 1_0_0_0
A__ : List[str] = """huggingface/label-files"""
A__ : Dict = """imagenet-1k-id2label.json"""
A__ : List[str] = json.load(open(hf_hub_download(UpperCAmelCase__, UpperCAmelCase__, repo_type="""dataset""" ), """r""" ) )
A__ : Dict = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()}
A__ : Optional[int] = idalabel
A__ : Dict = {v: k for k, v in idalabel.items()}
A__ : List[str] = int(deit_name[-6:-4] )
A__ : str = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("""tiny""" ):
A__ : List[str] = 1_9_2
A__ : int = 7_6_8
A__ : List[Any] = 1_2
A__ : Dict = 3
elif deit_name[9:].startswith("""small""" ):
A__ : List[Any] = 3_8_4
A__ : List[str] = 1_5_3_6
A__ : Any = 1_2
A__ : Union[str, Any] = 6
if deit_name[9:].startswith("""base""" ):
pass
elif deit_name[4:].startswith("""large""" ):
A__ : int = 1_0_2_4
A__ : str = 4_0_9_6
A__ : Any = 2_4
A__ : int = 1_6
# load original model from timm
A__ : Dict = timm.create_model(UpperCAmelCase__, pretrained=UpperCAmelCase__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A__ : Tuple = timm_model.state_dict()
A__ : str = create_rename_keys(UpperCAmelCase__, UpperCAmelCase__ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
read_in_q_k_v(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
# load HuggingFace model
A__ : str = DeiTForImageClassificationWithTeacher(UpperCAmelCase__ ).eval()
model.load_state_dict(UpperCAmelCase__ )
# Check outputs on an image, prepared by DeiTImageProcessor
A__ : int = int(
(2_5_6 / 2_2_4) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
A__ : Any = DeiTImageProcessor(size=UpperCAmelCase__, crop_size=config.image_size )
A__ : Union[str, Any] = image_processor(images=prepare_img(), return_tensors="""pt""" )
A__ : Optional[Any] = encoding["""pixel_values"""]
A__ : Union[str, Any] = model(UpperCAmelCase__ )
A__ : Union[str, Any] = timm_model(UpperCAmelCase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(UpperCAmelCase__, outputs.logits, atol=1e-3 )
Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ )
print(f'Saving model {deit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCAmelCase__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--deit_name''',
default='''vit_deit_base_distilled_patch16_224''',
type=str,
help='''Name of the DeiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
A_ = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 296
| 1
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
snake_case_ = MODEL_FOR_CAUSAL_LM_MAPPING
snake_case_ = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : List[str] = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""pt""" )
# Using `do_sample=False` to force deterministic output
A__ : Optional[int] = text_generator("""This is a test""" , do_sample=snake_case )
self.assertEqual(
snake_case , [
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
] , )
A__ : Tuple = text_generator(["""This is a test""", """This is a second test"""] )
self.assertEqual(
snake_case , [
[
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"""
""" oscope. oscope. FiliFili@@"""
)
}
],
] , )
A__ : List[str] = text_generator("""This is a test""" , do_sample=snake_case , num_return_sequences=2 , return_tensors=snake_case )
self.assertEqual(
snake_case , [
{"""generated_token_ids""": ANY(snake_case )},
{"""generated_token_ids""": ANY(snake_case )},
] , )
A__ : Tuple = text_generator.model.config.eos_token_id
A__ : str = """<pad>"""
A__ : Optional[int] = text_generator(
["""This is a test""", """This is a second test"""] , do_sample=snake_case , num_return_sequences=2 , batch_size=2 , return_tensors=snake_case , )
self.assertEqual(
snake_case , [
[
{"""generated_token_ids""": ANY(snake_case )},
{"""generated_token_ids""": ANY(snake_case )},
],
[
{"""generated_token_ids""": ANY(snake_case )},
{"""generated_token_ids""": ANY(snake_case )},
],
] , )
@require_tf
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : List[Any] = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""tf""" )
# Using `do_sample=False` to force deterministic output
A__ : int = text_generator("""This is a test""" , do_sample=snake_case )
self.assertEqual(
snake_case , [
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
] , )
A__ : List[Any] = text_generator(["""This is a test""", """This is a second test"""] , do_sample=snake_case )
self.assertEqual(
snake_case , [
[
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"""
""" Cannes 閲閲Cannes Cannes Cannes 攵 please,"""
)
}
],
] , )
def _UpperCamelCase ( self : int , snake_case : Optional[Any] , snake_case : List[Any] , snake_case : Tuple ):
'''simple docstring'''
A__ : Dict = TextGenerationPipeline(model=snake_case , tokenizer=snake_case )
return text_generator, ["This is a test", "Another test"]
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
A__ : List[Any] = """Hello I believe in"""
A__ : List[str] = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" )
A__ : Any = text_generator(snake_case )
self.assertEqual(
snake_case , [{"""generated_text""": """Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"""}] , )
A__ : Optional[int] = text_generator(snake_case , stop_sequence=""" fe""" )
self.assertEqual(snake_case , [{"""generated_text""": """Hello I believe in fe"""}] )
def _UpperCamelCase ( self : Any , snake_case : List[str] , snake_case : Optional[Any] ):
'''simple docstring'''
A__ : List[str] = text_generator.model
A__ : Any = text_generator.tokenizer
A__ : List[str] = text_generator("""This is a test""" )
self.assertEqual(snake_case , [{"""generated_text""": ANY(snake_case )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
A__ : Optional[int] = text_generator("""This is a test""" , return_full_text=snake_case )
self.assertEqual(snake_case , [{"""generated_text""": ANY(snake_case )}] )
self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] )
A__ : Any = pipeline(task="""text-generation""" , model=snake_case , tokenizer=snake_case , return_full_text=snake_case )
A__ : Any = text_generator("""This is a test""" )
self.assertEqual(snake_case , [{"""generated_text""": ANY(snake_case )}] )
self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] )
A__ : Tuple = text_generator("""This is a test""" , return_full_text=snake_case )
self.assertEqual(snake_case , [{"""generated_text""": ANY(snake_case )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
A__ : Dict = text_generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=snake_case )
self.assertEqual(
snake_case , [
[{"""generated_text""": ANY(snake_case )}, {"""generated_text""": ANY(snake_case )}],
[{"""generated_text""": ANY(snake_case )}, {"""generated_text""": ANY(snake_case )}],
] , )
if text_generator.tokenizer.pad_token is not None:
A__ : Optional[int] = text_generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=snake_case )
self.assertEqual(
snake_case , [
[{"""generated_text""": ANY(snake_case )}, {"""generated_text""": ANY(snake_case )}],
[{"""generated_text""": ANY(snake_case )}, {"""generated_text""": ANY(snake_case )}],
] , )
with self.assertRaises(snake_case ):
A__ : Tuple = text_generator("""test""" , return_full_text=snake_case , return_text=snake_case )
with self.assertRaises(snake_case ):
A__ : Optional[Any] = text_generator("""test""" , return_full_text=snake_case , return_tensors=snake_case )
with self.assertRaises(snake_case ):
A__ : str = text_generator("""test""" , return_text=snake_case , return_tensors=snake_case )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
A__ : int = text_generator("""""" )
self.assertEqual(snake_case , [{"""generated_text""": ANY(snake_case )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
A__ : List[Any] = text_generator("""""" )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
A__ : Tuple = ["""RwkvForCausalLM""", """XGLMForCausalLM""", """GPTNeoXForCausalLM"""]
if (
tokenizer.model_max_length < 1_0000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator("""This is a test""" * 500 , max_new_tokens=20 )
A__ : Any = text_generator("""This is a test""" * 500 , handle_long_generation="""hole""" , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(snake_case ):
text_generator(
"""This is a test""" * 500 , handle_long_generation="""hole""" , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def _UpperCamelCase ( self : str ):
'''simple docstring'''
import torch
# Classic `model_kwargs`
A__ : List[str] = pipeline(
model="""hf-internal-testing/tiny-random-bloom""" , model_kwargs={"""device_map""": """auto""", """torch_dtype""": torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
A__ : Optional[Any] = pipe("""This is a test""" )
self.assertEqual(
snake_case , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
A__ : Tuple = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
A__ : str = pipe("""This is a test""" )
self.assertEqual(
snake_case , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
A__ : Optional[int] = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
A__ : int = pipe("""This is a test""" )
self.assertEqual(
snake_case , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
@require_torch
@require_torch_gpu
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
import torch
A__ : int = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device=0 , torch_dtype=torch.floataa )
pipe("""This is a test""" )
@require_torch
@require_accelerate
@require_torch_gpu
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
import torch
A__ : Optional[int] = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.floataa )
pipe("""This is a test""" , do_sample=snake_case , top_p=0.5 )
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
A__ : Union[str, Any] = """Hello world"""
A__ : Any = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" )
if text_generator.model.framework == "tf":
A__ : List[str] = logging.get_logger("""transformers.generation.tf_utils""" )
else:
A__ : List[str] = logging.get_logger("""transformers.generation.utils""" )
A__ : List[Any] = """Both `max_new_tokens`""" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(snake_case ) as cl:
A__ : str = text_generator(snake_case , max_length=10 , max_new_tokens=1 )
self.assertIn(snake_case , cl.out )
# The user only sets one -> no warning
with CaptureLogger(snake_case ) as cl:
A__ : Optional[int] = text_generator(snake_case , max_new_tokens=1 )
self.assertNotIn(snake_case , cl.out )
with CaptureLogger(snake_case ) as cl:
A__ : Dict = text_generator(snake_case , max_length=10 )
self.assertNotIn(snake_case , cl.out )
| 296
|
"""simple docstring"""
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def _lowerCAmelCase ( UpperCAmelCase__ : Sequence[float], UpperCAmelCase__ : int, UpperCAmelCase__ : int ) ->tuple[int | None, int | None, float]:
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
A__ : Optional[int] = (low + high) // 2
A__ , A__ , A__ : List[Any] = max_subarray(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
A__ , A__ , A__ : Union[str, Any] = max_subarray(UpperCAmelCase__, mid + 1, UpperCAmelCase__ )
A__ , A__ , A__ : Union[str, Any] = max_cross_sum(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def _lowerCAmelCase ( UpperCAmelCase__ : Sequence[float], UpperCAmelCase__ : int, UpperCAmelCase__ : int, UpperCAmelCase__ : int ) ->tuple[int, int, float]:
A__ , A__ : Dict = float("""-inf""" ), -1
A__ , A__ : Optional[Any] = float("""-inf""" ), -1
A__ : int | float = 0
for i in range(UpperCAmelCase__, low - 1, -1 ):
summ += arr[i]
if summ > left_sum:
A__ : Optional[int] = summ
A__ : Union[str, Any] = i
A__ : Optional[Any] = 0
for i in range(mid + 1, high + 1 ):
summ += arr[i]
if summ > right_sum:
A__ : int = summ
A__ : Union[str, Any] = i
return max_left, max_right, (left_sum + right_sum)
def _lowerCAmelCase ( UpperCAmelCase__ : int ) ->float:
A__ : Union[str, Any] = [randint(1, UpperCAmelCase__ ) for _ in range(UpperCAmelCase__ )]
A__ : Any = time.time()
max_subarray(UpperCAmelCase__, 0, input_size - 1 )
A__ : List[Any] = time.time()
return end - start
def _lowerCAmelCase ( ) ->None:
A__ : List[Any] = [1_0, 1_0_0, 1_0_0_0, 1_0_0_0_0, 5_0_0_0_0, 1_0_0_0_0_0, 2_0_0_0_0_0, 3_0_0_0_0_0, 4_0_0_0_0_0, 5_0_0_0_0_0]
A__ : Any = [time_max_subarray(UpperCAmelCase__ ) for input_size in input_sizes]
print("""No of Inputs\t\tTime Taken""" )
for input_size, runtime in zip(UpperCAmelCase__, UpperCAmelCase__ ):
print(UpperCAmelCase__, """\t\t""", UpperCAmelCase__ )
plt.plot(UpperCAmelCase__, UpperCAmelCase__ )
plt.xlabel("""Number of Inputs""" )
plt.ylabel("""Time taken in seconds""" )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 296
| 1
|
"""simple docstring"""
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def _lowerCAmelCase ( ) ->Dict:
raise RuntimeError("""CUDA out of memory.""" )
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : str ):
'''simple docstring'''
super().__init__()
A__ : List[str] = nn.Linear(3 , 4 )
A__ : Optional[Any] = nn.BatchNormad(4 )
A__ : Any = nn.Linear(4 , 5 )
def _UpperCamelCase ( self : str , snake_case : List[Any] ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(snake_case ) ) )
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : List[Any] = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(snake_case : Union[str, Any] ):
nonlocal batch_sizes
batch_sizes.append(snake_case )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(snake_case , [128, 64, 32, 16, 8] )
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : List[str] = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(snake_case : int , snake_case : Optional[Any] ):
nonlocal batch_sizes
batch_sizes.append(snake_case )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
A__ , A__ : Optional[int] = mock_training_loop_function("""hello""" )
self.assertListEqual(snake_case , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, """hello"""] )
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(snake_case : Union[str, Any] ):
pass
with self.assertRaises(snake_case ) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] )
def _UpperCamelCase ( self : str ):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(snake_case : Union[str, Any] ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(snake_case ) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] )
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(snake_case : Any , snake_case : List[str] , snake_case : Optional[int] ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(snake_case ) as cm:
mock_training_loop_function(128 , """hello""" , """world""" )
self.assertIn("""Batch size was passed into `f`""" , cm.exception.args[0] )
self.assertIn("""`f(arg1='hello', arg2='world')""" , cm.exception.args[0] )
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(snake_case : Dict ):
raise ValueError("""Oops, we had an error!""" )
with self.assertRaises(snake_case ) as cm:
mock_training_loop_function()
self.assertIn("""Oops, we had an error!""" , cm.exception.args[0] )
@require_cuda
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : Any = torch.cuda.memory_allocated()
A__ : int = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , snake_case )
A__ : int = release_memory(snake_case )
self.assertEqual(torch.cuda.memory_allocated() , snake_case )
| 296
|
"""simple docstring"""
from __future__ import annotations
class __SCREAMING_SNAKE_CASE :
def __init__( self : Dict , snake_case : int ):
'''simple docstring'''
A__ : List[Any] = order
# a_{0} ... a_{k}
A__ : List[Any] = [1.0] + [0.0] * order
# b_{0} ... b_{k}
A__ : str = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
A__ : Union[str, Any] = [0.0] * self.order
# y[n-1] ... y[n-k]
A__ : List[str] = [0.0] * self.order
def _UpperCamelCase ( self : Optional[int] , snake_case : list[float] , snake_case : list[float] ):
'''simple docstring'''
if len(snake_case ) < self.order:
A__ : Any = [1.0, *a_coeffs]
if len(snake_case ) != self.order + 1:
A__ : str = (
F'Expected a_coeffs to have {self.order + 1} elements '
F'for {self.order}-order filter, got {len(snake_case )}'
)
raise ValueError(snake_case )
if len(snake_case ) != self.order + 1:
A__ : Union[str, Any] = (
F'Expected b_coeffs to have {self.order + 1} elements '
F'for {self.order}-order filter, got {len(snake_case )}'
)
raise ValueError(snake_case )
A__ : Dict = a_coeffs
A__ : Any = b_coeffs
def _UpperCamelCase ( self : List[str] , snake_case : float ):
'''simple docstring'''
A__ : str = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
A__ : Dict = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
A__ : Tuple = self.input_history[:-1]
A__ : int = self.output_history[:-1]
A__ : Dict = sample
A__ : Tuple = result
return result
| 296
| 1
|
"""simple docstring"""
def _lowerCAmelCase ( UpperCAmelCase__ : Dict ) ->Optional[int]:
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def _lowerCAmelCase ( UpperCAmelCase__ : dict[int, list[int]] ) ->list[tuple[int, int]]:
A__ : str = 0
A__ : List[Any] = len(UpperCAmelCase__ ) # No of vertices in graph
A__ : Tuple = [0] * n
A__ : Any = [False] * n
def dfs(UpperCAmelCase__ : Tuple, UpperCAmelCase__ : Dict, UpperCAmelCase__ : int, UpperCAmelCase__ : Optional[int] ):
A__ : Any = True
A__ : str = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, id_ )
A__ : int = min(low[at], low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
A__ : int = min(low[at], low[to] )
A__ : list[tuple[int, int]] = []
for i in range(UpperCAmelCase__ ):
if not visited[i]:
dfs(UpperCAmelCase__, -1, UpperCAmelCase__, id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 296
|
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[int] , snake_case : Optional[Any] , snake_case : Tuple=13 , snake_case : Dict=7 , snake_case : Optional[int]=True , snake_case : Union[str, Any]=True , snake_case : Dict=True , snake_case : Any=True , snake_case : List[str]=99 , snake_case : str=64 , snake_case : Optional[int]=5 , snake_case : str=4 , snake_case : List[Any]=37 , snake_case : Optional[Any]="gelu" , snake_case : List[str]=0.1 , snake_case : str=0.1 , snake_case : Optional[int]=512 , snake_case : Dict=16 , snake_case : List[Any]=2 , snake_case : Optional[int]=0.02 , snake_case : Any=3 , snake_case : Union[str, Any]=4 , snake_case : Dict=None , ):
'''simple docstring'''
A__ : Tuple = parent
A__ : Union[str, Any] = batch_size
A__ : List[str] = seq_length
A__ : Optional[int] = is_training
A__ : Dict = use_input_mask
A__ : Any = use_token_type_ids
A__ : Optional[Any] = use_labels
A__ : List[str] = vocab_size
A__ : Optional[int] = hidden_size
A__ : Optional[Any] = num_hidden_layers
A__ : Any = num_attention_heads
A__ : List[Any] = intermediate_size
A__ : Optional[Any] = hidden_act
A__ : Optional[int] = hidden_dropout_prob
A__ : Tuple = attention_probs_dropout_prob
A__ : str = max_position_embeddings
A__ : List[str] = type_vocab_size
A__ : Union[str, Any] = type_sequence_label_size
A__ : List[Any] = initializer_range
A__ : Optional[int] = num_labels
A__ : Dict = num_choices
A__ : Dict = scope
A__ : List[Any] = vocab_size - 1
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : List[Any] = None
if self.use_input_mask:
A__ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
A__ : Union[str, Any] = None
if self.use_labels:
A__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : Tuple = self.get_config()
return config, input_ids, input_mask, token_labels
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
A__ , A__ , A__ , A__ : str = self.prepare_config_and_inputs()
A__ : Union[str, Any] = True
return config, input_ids, input_mask, token_labels
def _UpperCamelCase ( self : Union[str, Any] , snake_case : Optional[int] , snake_case : List[str] , snake_case : int ):
'''simple docstring'''
A__ : Any = GPTNeoXModel(config=snake_case )
model.to(snake_case )
model.eval()
A__ : int = model(snake_case , attention_mask=snake_case )
A__ : Optional[int] = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : Union[str, Any] , snake_case : str , snake_case : Any , snake_case : Union[str, Any] ):
'''simple docstring'''
A__ : int = True
A__ : str = GPTNeoXModel(snake_case )
model.to(snake_case )
model.eval()
A__ : Tuple = model(snake_case , attention_mask=snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : Dict , snake_case : List[Any] , snake_case : str , snake_case : Optional[Any] , snake_case : Any ):
'''simple docstring'''
A__ : Any = GPTNeoXForCausalLM(config=snake_case )
model.to(snake_case )
model.eval()
A__ : Tuple = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self : List[str] , snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : Union[str, Any] , snake_case : Tuple ):
'''simple docstring'''
A__ : int = self.num_labels
A__ : int = GPTNeoXForQuestionAnswering(snake_case )
model.to(snake_case )
model.eval()
A__ : Optional[Any] = model(snake_case , attention_mask=snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self : str , snake_case : Tuple , snake_case : int , snake_case : int , snake_case : Dict ):
'''simple docstring'''
A__ : List[Any] = self.num_labels
A__ : Tuple = GPTNeoXForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
A__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : List[str] = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self : Any , snake_case : Union[str, Any] , snake_case : int , snake_case : Tuple , snake_case : Any ):
'''simple docstring'''
A__ : Tuple = self.num_labels
A__ : Any = GPTNeoXForTokenClassification(snake_case )
model.to(snake_case )
model.eval()
A__ : Dict = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self : List[str] , snake_case : List[str] , snake_case : Tuple , snake_case : Any ):
'''simple docstring'''
A__ : Optional[int] = True
A__ : Any = GPTNeoXForCausalLM(config=snake_case )
model.to(snake_case )
model.eval()
# first forward pass
A__ : Tuple = model(snake_case , attention_mask=snake_case , use_cache=snake_case )
A__ : str = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A__ : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
A__ : Any = torch.cat([input_ids, next_tokens] , dim=-1 )
A__ : Any = torch.cat([input_mask, next_mask] , dim=-1 )
A__ : Tuple = model(snake_case , attention_mask=snake_case , output_hidden_states=snake_case )
A__ : List[Any] = output_from_no_past["""hidden_states"""][0]
A__ : List[str] = model(
snake_case , attention_mask=snake_case , past_key_values=snake_case , output_hidden_states=snake_case , )["""hidden_states"""][0]
# select random slice
A__ : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A__ : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
A__ : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case , snake_case , atol=1e-3 ) )
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ : str = self.prepare_config_and_inputs()
A__ , A__ , A__ , A__ : Dict = config_and_inputs
A__ : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase ):
snake_case_ = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case_ = (GPTNeoXForCausalLM,) if is_torch_available() else ()
snake_case_ = (
{
'feature-extraction': GPTNeoXModel,
'question-answering': GPTNeoXForQuestionAnswering,
'text-classification': GPTNeoXForSequenceClassification,
'text-generation': GPTNeoXForCausalLM,
'token-classification': GPTNeoXForTokenClassification,
'zero-shot': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : Any = GPTNeoXModelTester(self )
A__ : Any = ConfigTester(self , config_class=snake_case , hidden_size=64 , num_attention_heads=8 )
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
A__ , A__ , A__ , A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(snake_case , snake_case , snake_case )
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
A__ , A__ , A__ , A__ : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(snake_case , snake_case , snake_case )
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
A__ , A__ , A__ , A__ : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
A__ : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(snake_case , snake_case , snake_case )
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ , A__ , A__ , A__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(snake_case , snake_case , snake_case )
def _UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
A__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*snake_case )
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case )
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case )
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case )
@unittest.skip(reason="""Feed forward chunking is not implemented""" )
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _UpperCamelCase ( self : Optional[Any] , snake_case : Optional[Any] ):
'''simple docstring'''
A__ , A__ : int = self.model_tester.prepare_config_and_inputs_for_common()
A__ : List[Any] = ids_tensor([1, 10] , config.vocab_size )
A__ : str = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A__ : Union[str, Any] = GPTNeoXModel(snake_case )
original_model.to(snake_case )
original_model.eval()
A__ : Optional[int] = original_model(snake_case ).last_hidden_state
A__ : List[str] = original_model(snake_case ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A__ : Optional[int] = {"""type""": scaling_type, """factor""": 10.0}
A__ : Optional[int] = GPTNeoXModel(snake_case )
scaled_model.to(snake_case )
scaled_model.eval()
A__ : List[str] = scaled_model(snake_case ).last_hidden_state
A__ : Tuple = scaled_model(snake_case ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case , snake_case , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(snake_case , snake_case , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case , snake_case , atol=1e-5 ) )
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : Any = AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
for checkpointing in [True, False]:
A__ : Optional[Any] = GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(snake_case )
A__ : Optional[Any] = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(snake_case )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
A__ : Union[str, Any] = """My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure"""
A__ : Tuple = model.generate(**snake_case , do_sample=snake_case , max_new_tokens=20 )
A__ : Tuple = tokenizer.batch_decode(snake_case )[0]
self.assertEqual(snake_case , snake_case )
| 296
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A_ = {
'''configuration_data2vec_audio''': ['''DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Data2VecAudioConfig'''],
'''configuration_data2vec_text''': [
'''DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Data2VecTextConfig''',
'''Data2VecTextOnnxConfig''',
],
'''configuration_data2vec_vision''': [
'''DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Data2VecVisionConfig''',
'''Data2VecVisionOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecAudioForAudioFrameClassification''',
'''Data2VecAudioForCTC''',
'''Data2VecAudioForSequenceClassification''',
'''Data2VecAudioForXVector''',
'''Data2VecAudioModel''',
'''Data2VecAudioPreTrainedModel''',
]
A_ = [
'''DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecTextForCausalLM''',
'''Data2VecTextForMaskedLM''',
'''Data2VecTextForMultipleChoice''',
'''Data2VecTextForQuestionAnswering''',
'''Data2VecTextForSequenceClassification''',
'''Data2VecTextForTokenClassification''',
'''Data2VecTextModel''',
'''Data2VecTextPreTrainedModel''',
]
A_ = [
'''DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecVisionForImageClassification''',
'''Data2VecVisionForMaskedImageModeling''',
'''Data2VecVisionForSemanticSegmentation''',
'''Data2VecVisionModel''',
'''Data2VecVisionPreTrainedModel''',
]
if is_tf_available():
A_ = [
'''TFData2VecVisionForImageClassification''',
'''TFData2VecVisionForSemanticSegmentation''',
'''TFData2VecVisionModel''',
'''TFData2VecVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 296
|
"""simple docstring"""
from collections import defaultdict
from math import gcd
def _lowerCAmelCase ( UpperCAmelCase__ : int = 1_5_0_0_0_0_0 ) ->int:
A__ : defaultdict = defaultdict(UpperCAmelCase__ )
A__ : Any = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1, UpperCAmelCase__, 2 ):
if gcd(UpperCAmelCase__, UpperCAmelCase__ ) > 1:
continue
A__ : str = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(UpperCAmelCase__, limit + 1, UpperCAmelCase__ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F'{solution() = }')
| 296
| 1
|
"""simple docstring"""
from cva import destroyAllWindows, imread, imshow, waitKey
def _lowerCAmelCase ( UpperCAmelCase__ : Dict ) ->Dict:
# getting number of pixels in the image
A__ , A__ : Optional[int] = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(UpperCAmelCase__ ):
for j in range(UpperCAmelCase__ ):
A__ : Optional[Any] = [2_5_5, 2_5_5, 2_5_5] - img[i][j]
return img
if __name__ == "__main__":
# read original image
A_ = imread('''image_data/lena.jpg''', 1)
# convert to its negative
A_ = convert_to_negative(img)
# show result image
imshow('''negative of original image''', img)
waitKey(0)
destroyAllWindows()
| 296
|
"""simple docstring"""
import os
from distutils.util import strtobool
def _lowerCAmelCase ( UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Optional[Any] ) ->List[str]:
for e in env_keys:
A__ : List[Any] = int(os.environ.get(UpperCAmelCase__, -1 ) )
if val >= 0:
return val
return default
def _lowerCAmelCase ( UpperCAmelCase__ : Tuple, UpperCAmelCase__ : str=False ) ->List[str]:
A__ : List[Any] = os.environ.get(UpperCAmelCase__, str(UpperCAmelCase__ ) )
return strtobool(UpperCAmelCase__ ) == 1 # As its name indicates `strtobool` actually returns an int...
def _lowerCAmelCase ( UpperCAmelCase__ : Tuple, UpperCAmelCase__ : List[Any]="no" ) ->int:
A__ : str = os.environ.get(UpperCAmelCase__, str(UpperCAmelCase__ ) )
return value
| 296
| 1
|
"""simple docstring"""
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = 'new-model'
if is_tf_available():
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = NewModelConfig
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : Union[str, Any] = """bert-base-cased"""
A__ : Any = AutoConfig.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
self.assertIsInstance(snake_case , snake_case )
A__ : Tuple = TFAutoModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
self.assertIsInstance(snake_case , snake_case )
@slow
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
A__ : Union[str, Any] = """bert-base-cased"""
A__ : Any = AutoConfig.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
self.assertIsInstance(snake_case , snake_case )
A__ : List[str] = TFAutoModelForPreTraining.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
self.assertIsInstance(snake_case , snake_case )
@slow
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Any = AutoConfig.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
self.assertIsInstance(snake_case , snake_case )
A__ : Union[str, Any] = TFAutoModelForCausalLM.from_pretrained(snake_case )
A__ , A__ : Dict = TFAutoModelForCausalLM.from_pretrained(snake_case , output_loading_info=snake_case )
self.assertIsNotNone(snake_case )
self.assertIsInstance(snake_case , snake_case )
@slow
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : List[Any] = AutoConfig.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
self.assertIsInstance(snake_case , snake_case )
A__ : Optional[int] = TFAutoModelWithLMHead.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
self.assertIsInstance(snake_case , snake_case )
@slow
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : List[str] = AutoConfig.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
self.assertIsInstance(snake_case , snake_case )
A__ : List[Any] = TFAutoModelForMaskedLM.from_pretrained(snake_case )
A__ , A__ : Optional[Any] = TFAutoModelForMaskedLM.from_pretrained(snake_case , output_loading_info=snake_case )
self.assertIsNotNone(snake_case )
self.assertIsInstance(snake_case , snake_case )
@slow
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : List[str] = AutoConfig.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
self.assertIsInstance(snake_case , snake_case )
A__ : Optional[int] = TFAutoModelForSeqaSeqLM.from_pretrained(snake_case )
A__ , A__ : Dict = TFAutoModelForSeqaSeqLM.from_pretrained(snake_case , output_loading_info=snake_case )
self.assertIsNotNone(snake_case )
self.assertIsInstance(snake_case , snake_case )
@slow
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
A__ : List[Any] = AutoConfig.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
self.assertIsInstance(snake_case , snake_case )
A__ : Tuple = TFAutoModelForSequenceClassification.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
self.assertIsInstance(snake_case , snake_case )
@slow
def _UpperCamelCase ( self : str ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
A__ : Optional[int] = AutoConfig.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
self.assertIsInstance(snake_case , snake_case )
A__ : Optional[int] = TFAutoModelForQuestionAnswering.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
self.assertIsInstance(snake_case , snake_case )
@slow
@require_tensorflow_probability
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
A__ : int = AutoConfig.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
self.assertIsInstance(snake_case , snake_case )
A__ : Optional[Any] = TFAutoModelForTableQuestionAnswering.from_pretrained(snake_case )
A__ , A__ : Optional[int] = TFAutoModelForTableQuestionAnswering.from_pretrained(
snake_case , output_loading_info=snake_case )
self.assertIsNotNone(snake_case )
self.assertIsInstance(snake_case , snake_case )
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
A__ : Union[str, Any] = TFAutoModelWithLMHead.from_pretrained(snake_case )
self.assertIsInstance(snake_case , snake_case )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=snake_case ) , 1_4410 )
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
A__ : List[str] = TFAutoModelWithLMHead.from_pretrained(snake_case )
self.assertIsInstance(snake_case , snake_case )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=snake_case ) , 1_4410 )
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
A__ : int = TFAutoModel.from_pretrained("""sgugger/funnel-random-tiny""" )
self.assertIsInstance(snake_case , snake_case )
A__ : Any = copy.deepcopy(model.config )
A__ : Dict = ["""FunnelBaseModel"""]
A__ : List[str] = TFAutoModel.from_config(snake_case )
self.assertIsInstance(snake_case , snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(snake_case )
A__ : Tuple = TFAutoModel.from_pretrained(snake_case )
self.assertIsInstance(snake_case , snake_case )
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
try:
AutoConfig.register("""new-model""" , snake_case )
A__ : Any = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(snake_case ):
auto_class.register(snake_case , snake_case )
auto_class.register(snake_case , snake_case )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(snake_case ):
auto_class.register(snake_case , snake_case )
# Now that the config is registered, it can be used as any other config with the auto-API
A__ : str = BertModelTester(self ).get_config()
A__ : Tuple = NewModelConfig(**tiny_config.to_dict() )
A__ : Any = auto_class.from_config(snake_case )
self.assertIsInstance(snake_case , snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(snake_case )
A__ : Any = auto_class.from_pretrained(snake_case )
self.assertIsInstance(snake_case , snake_case )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def _UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
with self.assertRaisesRegex(
snake_case , """bert-base is not a local folder and is not a valid model identifier""" ):
A__ : Optional[int] = TFAutoModel.from_pretrained("""bert-base""" )
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
with self.assertRaisesRegex(
snake_case , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
A__ : List[Any] = TFAutoModel.from_pretrained(snake_case , revision="""aaaaaa""" )
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
with self.assertRaisesRegex(
snake_case , """hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin""" , ):
A__ : Any = TFAutoModel.from_pretrained("""hf-internal-testing/config-no-model""" )
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
with self.assertRaisesRegex(snake_case , """Use `from_pt=True` to load this model""" ):
A__ : Optional[int] = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-bert-pt-only""" )
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
A__ : Dict = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
A__ : List[str] = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
A__ : str = TFAutoModel.from_pretrained("""ArthurZ/tiny-random-bert-sharded""" )
with RequestCounter() as counter:
A__ : List[Any] = TFAutoModel.from_pretrained("""ArthurZ/tiny-random-bert-sharded""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 296
|
"""simple docstring"""
import cva
import numpy as np
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , snake_case : float , snake_case : int ):
'''simple docstring'''
if k in (0.04, 0.06):
A__ : Optional[int] = k
A__ : int = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : List[Any] ):
'''simple docstring'''
return str(self.k )
def _UpperCamelCase ( self : int , snake_case : str ):
'''simple docstring'''
A__ : List[str] = cva.imread(snake_case , 0 )
A__ , A__ : Union[str, Any] = img.shape
A__ : list[list[int]] = []
A__ : Optional[Any] = img.copy()
A__ : List[str] = cva.cvtColor(snake_case , cva.COLOR_GRAY2RGB )
A__ , A__ : List[Any] = np.gradient(snake_case )
A__ : List[Any] = dx**2
A__ : Any = dy**2
A__ : Dict = dx * dy
A__ : Any = 0.04
A__ : Optional[Any] = self.window_size // 2
for y in range(snake_case , h - offset ):
for x in range(snake_case , w - offset ):
A__ : List[str] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A__ : Tuple = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A__ : Optional[int] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A__ : int = (wxx * wyy) - (wxy**2)
A__ : Any = wxx + wyy
A__ : List[str] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
A_ = HarrisCorner(0.04, 3)
A_ , A_ = edge_detect.detect('''path_to_image''')
cva.imwrite('''detect.png''', color_img)
| 296
| 1
|
"""simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Any ) ->Tuple:
A__ : Optional[int] = OmegaConf.load(UpperCAmelCase__ )
A__ : Optional[int] = torch.load(UpperCAmelCase__, map_location="""cpu""" )["""model"""]
A__ : str = list(state_dict.keys() )
# extract state_dict for VQVAE
A__ : Dict = {}
A__ : Union[str, Any] = """first_stage_model."""
for key in keys:
if key.startswith(UpperCAmelCase__ ):
A__ : str = state_dict[key]
# extract state_dict for UNetLDM
A__ : Optional[Any] = {}
A__ : Optional[Any] = """model.diffusion_model."""
for key in keys:
if key.startswith(UpperCAmelCase__ ):
A__ : List[str] = state_dict[key]
A__ : Dict = config.model.params.first_stage_config.params
A__ : Optional[int] = config.model.params.unet_config.params
A__ : Dict = VQModel(**UpperCAmelCase__ ).eval()
vqvae.load_state_dict(UpperCAmelCase__ )
A__ : List[Any] = UNetLDMModel(**UpperCAmelCase__ ).eval()
unet.load_state_dict(UpperCAmelCase__ )
A__ : int = DDIMScheduler(
timesteps=config.model.params.timesteps, beta_schedule="""scaled_linear""", beta_start=config.model.params.linear_start, beta_end=config.model.params.linear_end, clip_sample=UpperCAmelCase__, )
A__ : int = LDMPipeline(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
pipeline.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
A_ = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 296
|
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
A_ = logging.get_logger(__name__)
A_ = Dict[str, Any]
A_ = List[Prediction]
@add_end_docstrings(UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
def __init__( self : str , *snake_case : Tuple , **snake_case : Tuple ):
'''simple docstring'''
super().__init__(*snake_case , **snake_case )
if self.framework == "tf":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
requires_backends(self , """vision""" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def _UpperCamelCase ( self : List[Any] , **snake_case : Optional[int] ):
'''simple docstring'''
A__ : Dict = {}
if "threshold" in kwargs:
A__ : int = kwargs["""threshold"""]
return {}, {}, postprocess_kwargs
def __call__( self : Tuple , *snake_case : Union[str, Any] , **snake_case : Union[str, Any] ):
'''simple docstring'''
return super().__call__(*snake_case , **snake_case )
def _UpperCamelCase ( self : str , snake_case : int ):
'''simple docstring'''
A__ : List[str] = load_image(snake_case )
A__ : int = torch.IntTensor([[image.height, image.width]] )
A__ : Union[str, Any] = self.image_processor(images=[image] , return_tensors="""pt""" )
if self.tokenizer is not None:
A__ : str = self.tokenizer(text=inputs["""words"""] , boxes=inputs["""boxes"""] , return_tensors="""pt""" )
A__ : List[str] = target_size
return inputs
def _UpperCamelCase ( self : Optional[int] , snake_case : List[Any] ):
'''simple docstring'''
A__ : str = model_inputs.pop("""target_size""" )
A__ : Dict = self.model(**snake_case )
A__ : Optional[Any] = outputs.__class__({"""target_size""": target_size, **outputs} )
if self.tokenizer is not None:
A__ : str = model_inputs["""bbox"""]
return model_outputs
def _UpperCamelCase ( self : Tuple , snake_case : Optional[int] , snake_case : int=0.9 ):
'''simple docstring'''
A__ : Any = model_outputs["""target_size"""]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
A__ , A__ : Tuple = target_size[0].tolist()
def unnormalize(snake_case : Optional[int] ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
A__ , A__ : Optional[int] = model_outputs["""logits"""].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
A__ : Optional[Any] = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
A__ : List[str] = [unnormalize(snake_case ) for bbox in model_outputs["""bbox"""].squeeze(0 )]
A__ : Tuple = ["""score""", """label""", """box"""]
A__ : Any = [dict(zip(snake_case , snake_case ) ) for vals in zip(scores.tolist() , snake_case , snake_case ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
A__ : Union[str, Any] = self.image_processor.post_process_object_detection(snake_case , snake_case , snake_case )
A__ : str = raw_annotations[0]
A__ : str = raw_annotation["""scores"""]
A__ : List[Any] = raw_annotation["""labels"""]
A__ : int = raw_annotation["""boxes"""]
A__ : str = scores.tolist()
A__ : Any = [self.model.config.idalabel[label.item()] for label in labels]
A__ : int = [self._get_bounding_box(snake_case ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
A__ : str = ["""score""", """label""", """box"""]
A__ : Dict = [
dict(zip(snake_case , snake_case ) )
for vals in zip(raw_annotation["""scores"""] , raw_annotation["""labels"""] , raw_annotation["""boxes"""] )
]
return annotation
def _UpperCamelCase ( self : Union[str, Any] , snake_case : "torch.Tensor" ):
'''simple docstring'''
if self.framework != "pt":
raise ValueError("""The ObjectDetectionPipeline is only available in PyTorch.""" )
A__ , A__ , A__ , A__ : Any = box.int().tolist()
A__ : Any = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 296
| 1
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = 42
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
snake_case_ = True
@register_to_config
def __init__( self : str , snake_case : int = 3 , snake_case : int = 3 , snake_case : Tuple[str] = ("DownEncoderBlock2D",) , snake_case : Tuple[str] = ("UpDecoderBlock2D",) , snake_case : Tuple[int] = (64,) , snake_case : int = 1 , snake_case : str = "silu" , snake_case : int = 4 , snake_case : int = 32 , snake_case : int = 32 , snake_case : float = 0.18215 , ):
'''simple docstring'''
super().__init__()
# pass init params to Encoder
A__ : Union[str, Any] = Encoder(
in_channels=snake_case , out_channels=snake_case , down_block_types=snake_case , block_out_channels=snake_case , layers_per_block=snake_case , act_fn=snake_case , norm_num_groups=snake_case , double_z=snake_case , )
# pass init params to Decoder
A__ : List[str] = Decoder(
in_channels=snake_case , out_channels=snake_case , up_block_types=snake_case , block_out_channels=snake_case , layers_per_block=snake_case , norm_num_groups=snake_case , act_fn=snake_case , )
A__ : Tuple = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
A__ : str = nn.Convad(snake_case , snake_case , 1 )
A__ : Tuple = False
A__ : int = False
# only relevant if vae tiling is enabled
A__ : str = self.config.sample_size
A__ : Optional[Any] = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
A__ : List[Any] = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
A__ : str = 0.25
def _UpperCamelCase ( self : Any , snake_case : List[Any] , snake_case : str=False ):
'''simple docstring'''
if isinstance(snake_case , (Encoder, Decoder) ):
A__ : Optional[int] = value
def _UpperCamelCase ( self : Any , snake_case : bool = True ):
'''simple docstring'''
A__ : Optional[int] = use_tiling
def _UpperCamelCase ( self : int ):
'''simple docstring'''
self.enable_tiling(snake_case )
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
A__ : Union[str, Any] = True
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : List[Any] = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
A__ : Optional[Any] = {}
def fn_recursive_add_processors(snake_case : str , snake_case : torch.nn.Module , snake_case : Dict[str, AttentionProcessor] ):
if hasattr(snake_case , """set_processor""" ):
A__ : List[Any] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'{name}.{sub_name}' , snake_case , snake_case )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(snake_case , snake_case , snake_case )
return processors
def _UpperCamelCase ( self : int , snake_case : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
'''simple docstring'''
A__ : List[Any] = len(self.attn_processors.keys() )
if isinstance(snake_case , snake_case ) and len(snake_case ) != count:
raise ValueError(
F'A dict of processors was passed, but the number of processors {len(snake_case )} does not match the'
F' number of attention layers: {count}. Please make sure to pass {count} processor classes.' )
def fn_recursive_attn_processor(snake_case : str , snake_case : torch.nn.Module , snake_case : str ):
if hasattr(snake_case , """set_processor""" ):
if not isinstance(snake_case , snake_case ):
module.set_processor(snake_case )
else:
module.set_processor(processor.pop(F'{name}.processor' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'{name}.{sub_name}' , snake_case , snake_case )
for name, module in self.named_children():
fn_recursive_attn_processor(snake_case , snake_case , snake_case )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def _UpperCamelCase ( self : Optional[int] , snake_case : torch.FloatTensor , snake_case : bool = True ):
'''simple docstring'''
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(snake_case , return_dict=snake_case )
if self.use_slicing and x.shape[0] > 1:
A__ : Dict = [self.encoder(snake_case ) for x_slice in x.split(1 )]
A__ : Dict = torch.cat(snake_case )
else:
A__ : int = self.encoder(snake_case )
A__ : Any = self.quant_conv(snake_case )
A__ : int = DiagonalGaussianDistribution(snake_case )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=snake_case )
def _UpperCamelCase ( self : Dict , snake_case : torch.FloatTensor , snake_case : bool = True ):
'''simple docstring'''
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(snake_case , return_dict=snake_case )
A__ : Dict = self.post_quant_conv(snake_case )
A__ : Optional[int] = self.decoder(snake_case )
if not return_dict:
return (dec,)
return DecoderOutput(sample=snake_case )
@apply_forward_hook
def _UpperCamelCase ( self : Tuple , snake_case : torch.FloatTensor , snake_case : bool = True ):
'''simple docstring'''
if self.use_slicing and z.shape[0] > 1:
A__ : Optional[int] = [self._decode(snake_case ).sample for z_slice in z.split(1 )]
A__ : Dict = torch.cat(snake_case )
else:
A__ : Tuple = self._decode(snake_case ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=snake_case )
def _UpperCamelCase ( self : Dict , snake_case : Optional[int] , snake_case : Optional[Any] , snake_case : List[str] ):
'''simple docstring'''
A__ : Optional[Any] = min(a.shape[2] , b.shape[2] , snake_case )
for y in range(snake_case ):
A__ : List[Any] = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def _UpperCamelCase ( self : Tuple , snake_case : Tuple , snake_case : Optional[int] , snake_case : List[str] ):
'''simple docstring'''
A__ : Dict = min(a.shape[3] , b.shape[3] , snake_case )
for x in range(snake_case ):
A__ : Optional[int] = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def _UpperCamelCase ( self : Any , snake_case : torch.FloatTensor , snake_case : bool = True ):
'''simple docstring'''
A__ : str = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
A__ : Dict = int(self.tile_latent_min_size * self.tile_overlap_factor )
A__ : Dict = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
A__ : int = []
for i in range(0 , x.shape[2] , snake_case ):
A__ : Tuple = []
for j in range(0 , x.shape[3] , snake_case ):
A__ : List[Any] = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
A__ : Optional[int] = self.encoder(snake_case )
A__ : List[Any] = self.quant_conv(snake_case )
row.append(snake_case )
rows.append(snake_case )
A__ : Optional[int] = []
for i, row in enumerate(snake_case ):
A__ : Dict = []
for j, tile in enumerate(snake_case ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
A__ : Optional[Any] = self.blend_v(rows[i - 1][j] , snake_case , snake_case )
if j > 0:
A__ : Optional[int] = self.blend_h(row[j - 1] , snake_case , snake_case )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(snake_case , dim=3 ) )
A__ : Any = torch.cat(snake_case , dim=2 )
A__ : List[str] = DiagonalGaussianDistribution(snake_case )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=snake_case )
def _UpperCamelCase ( self : Union[str, Any] , snake_case : torch.FloatTensor , snake_case : bool = True ):
'''simple docstring'''
A__ : List[Any] = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
A__ : Any = int(self.tile_sample_min_size * self.tile_overlap_factor )
A__ : int = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
A__ : Any = []
for i in range(0 , z.shape[2] , snake_case ):
A__ : int = []
for j in range(0 , z.shape[3] , snake_case ):
A__ : str = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
A__ : Any = self.post_quant_conv(snake_case )
A__ : str = self.decoder(snake_case )
row.append(snake_case )
rows.append(snake_case )
A__ : Tuple = []
for i, row in enumerate(snake_case ):
A__ : Optional[int] = []
for j, tile in enumerate(snake_case ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
A__ : Optional[int] = self.blend_v(rows[i - 1][j] , snake_case , snake_case )
if j > 0:
A__ : Union[str, Any] = self.blend_h(row[j - 1] , snake_case , snake_case )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(snake_case , dim=3 ) )
A__ : Optional[int] = torch.cat(snake_case , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=snake_case )
def _UpperCamelCase ( self : List[str] , snake_case : torch.FloatTensor , snake_case : bool = False , snake_case : bool = True , snake_case : Optional[torch.Generator] = None , ):
'''simple docstring'''
A__ : Union[str, Any] = sample
A__ : Tuple = self.encode(snake_case ).latent_dist
if sample_posterior:
A__ : List[Any] = posterior.sample(generator=snake_case )
else:
A__ : Optional[Any] = posterior.mode()
A__ : List[str] = self.decode(snake_case ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=snake_case )
| 296
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A_ = logging.get_logger(__name__)
A_ = {
'''microsoft/table-transformer-detection''': (
'''https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'''
),
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = 'table-transformer'
snake_case_ = ['past_key_values']
snake_case_ = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Dict , snake_case : int=True , snake_case : Dict=None , snake_case : Union[str, Any]=3 , snake_case : Dict=100 , snake_case : Tuple=6 , snake_case : Optional[int]=2048 , snake_case : int=8 , snake_case : Dict=6 , snake_case : Any=2048 , snake_case : str=8 , snake_case : Union[str, Any]=0.0 , snake_case : List[str]=0.0 , snake_case : List[str]=True , snake_case : Any="relu" , snake_case : str=256 , snake_case : int=0.1 , snake_case : Dict=0.0 , snake_case : str=0.0 , snake_case : Union[str, Any]=0.02 , snake_case : Union[str, Any]=1.0 , snake_case : Optional[Any]=False , snake_case : int="sine" , snake_case : Optional[Any]="resnet50" , snake_case : Optional[int]=True , snake_case : Any=False , snake_case : int=1 , snake_case : Tuple=5 , snake_case : Optional[int]=2 , snake_case : Tuple=1 , snake_case : Optional[Any]=1 , snake_case : Optional[Any]=5 , snake_case : Dict=2 , snake_case : Any=0.1 , **snake_case : Any , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
A__ : Optional[Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(snake_case , snake_case ):
A__ : Optional[int] = backbone_config.get("""model_type""" )
A__ : Optional[int] = CONFIG_MAPPING[backbone_model_type]
A__ : List[str] = config_class.from_dict(snake_case )
# set timm attributes to None
A__ , A__ , A__ : str = None, None, None
A__ : Tuple = use_timm_backbone
A__ : str = backbone_config
A__ : str = num_channels
A__ : List[Any] = num_queries
A__ : Optional[Any] = d_model
A__ : Tuple = encoder_ffn_dim
A__ : Union[str, Any] = encoder_layers
A__ : List[Any] = encoder_attention_heads
A__ : Optional[int] = decoder_ffn_dim
A__ : Any = decoder_layers
A__ : int = decoder_attention_heads
A__ : Any = dropout
A__ : Dict = attention_dropout
A__ : Dict = activation_dropout
A__ : Tuple = activation_function
A__ : List[str] = init_std
A__ : List[str] = init_xavier_std
A__ : Any = encoder_layerdrop
A__ : Optional[Any] = decoder_layerdrop
A__ : Union[str, Any] = encoder_layers
A__ : Dict = auxiliary_loss
A__ : List[Any] = position_embedding_type
A__ : Optional[Any] = backbone
A__ : str = use_pretrained_backbone
A__ : Union[str, Any] = dilation
# Hungarian matcher
A__ : Tuple = class_cost
A__ : Optional[Any] = bbox_cost
A__ : Dict = giou_cost
# Loss coefficients
A__ : Any = mask_loss_coefficient
A__ : str = dice_loss_coefficient
A__ : str = bbox_loss_coefficient
A__ : Union[str, Any] = giou_loss_coefficient
A__ : List[str] = eos_coefficient
super().__init__(is_encoder_decoder=snake_case , **snake_case )
@property
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
return self.d_model
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = version.parse('1.11' )
@property
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
return 1e-5
@property
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
return 12
| 296
| 1
|
"""simple docstring"""
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def _lowerCAmelCase ( UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Dict, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Dict=5 ) ->Tuple:
# Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py
assert masked_input.count("""<mask>""" ) == 1
A__ : Dict = torch.tensor(tokenizer.encode(UpperCAmelCase__, add_special_tokens=UpperCAmelCase__ ) ).unsqueeze(0 ) # Batch size 1
A__ : Tuple = model(UpperCAmelCase__ )[0] # The last hidden-state is the first element of the output tuple
A__ : Any = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
A__ : str = logits[0, masked_index, :]
A__ : Union[str, Any] = logits.softmax(dim=0 )
A__ , A__ : Optional[Any] = prob.topk(k=UpperCAmelCase__, dim=0 )
A__ : Optional[int] = """ """.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(UpperCAmelCase__ ) )] )
A__ : Union[str, Any] = tokenizer.mask_token
A__ : Dict = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(""" """ ) ):
A__ : Dict = predicted_token_bpe.replace("""\u2581""", """ """ )
if " {0}".format(UpperCAmelCase__ ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(""" {0}""".format(UpperCAmelCase__ ), UpperCAmelCase__ ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(UpperCAmelCase__, UpperCAmelCase__ ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
A_ = CamembertTokenizer.from_pretrained('''camembert-base''')
A_ = CamembertForMaskedLM.from_pretrained('''camembert-base''')
model.eval()
A_ = '''Le camembert est <mask> :)'''
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 296
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = 'Salesforce/blip-image-captioning-base'
snake_case_ = (
'This is a tool that generates a description of an image. It takes an input named `image` which should be the '
'image to caption, and returns a text that contains the description in English.'
)
snake_case_ = 'image_captioner'
snake_case_ = AutoModelForVisionaSeq
snake_case_ = ['image']
snake_case_ = ['text']
def __init__( self : int , *snake_case : Optional[int] , **snake_case : Optional[int] ):
'''simple docstring'''
requires_backends(self , ["""vision"""] )
super().__init__(*snake_case , **snake_case )
def _UpperCamelCase ( self : int , snake_case : "Image" ):
'''simple docstring'''
return self.pre_processor(images=snake_case , return_tensors="""pt""" )
def _UpperCamelCase ( self : int , snake_case : List[Any] ):
'''simple docstring'''
return self.model.generate(**snake_case )
def _UpperCamelCase ( self : Optional[int] , snake_case : Any ):
'''simple docstring'''
return self.pre_processor.batch_decode(snake_case , skip_special_tokens=snake_case )[0].strip()
| 296
| 1
|
"""simple docstring"""
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
A_ = '''src/transformers'''
A_ = '''docs/source/en/tasks'''
def _lowerCAmelCase ( UpperCAmelCase__ : Dict, UpperCAmelCase__ : Any, UpperCAmelCase__ : str ) ->Optional[int]:
with open(UpperCAmelCase__, """r""", encoding="""utf-8""", newline="""\n""" ) as f:
A__ : int = f.readlines()
# Find the start prompt.
A__ : List[Any] = 0
while not lines[start_index].startswith(UpperCAmelCase__ ):
start_index += 1
start_index += 1
A__ : str = start_index
while not lines[end_index].startswith(UpperCAmelCase__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
A_ = direct_transformers_import(TRANSFORMERS_PATH)
A_ = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
A_ = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def _lowerCAmelCase ( UpperCAmelCase__ : str ) ->str:
A__ : Optional[int] = TASK_GUIDE_TO_MODELS[task_guide]
A__ : Tuple = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(UpperCAmelCase__, set() )
A__ : Any = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f'[{name}](../model_doc/{code})' for code, name in model_names.items()] ) + "\n"
def _lowerCAmelCase ( UpperCAmelCase__ : List[Any], UpperCAmelCase__ : int=False ) ->int:
A__ , A__ , A__ , A__ : str = _find_text_in_file(
filename=os.path.join(UpperCAmelCase__, UpperCAmelCase__ ), start_prompt="""<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->""", end_prompt="""<!--End of the generated tip-->""", )
A__ : List[Any] = get_model_list_for_task(UpperCAmelCase__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(UpperCAmelCase__, UpperCAmelCase__ ), """w""", encoding="""utf-8""", newline="""\n""" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f'The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'
""" to fix this.""" )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
A_ = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 296
|
"""simple docstring"""
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : List[Any] ):
'''simple docstring'''
super().__init__()
A__ : int = nn.Linear(3 , 4 )
A__ : Union[str, Any] = nn.BatchNormad(4 )
A__ : Union[str, Any] = nn.Linear(4 , 5 )
def _UpperCamelCase ( self : str , snake_case : List[str] ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(snake_case ) ) )
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ : int = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(snake_case , model.state_dict() )
A__ : List[str] = os.path.join(snake_case , """index.json""" )
self.assertTrue(os.path.isfile(snake_case ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
A__ : List[str] = os.path.join(snake_case , F'{key}.dat' )
self.assertTrue(os.path.isfile(snake_case ) )
# TODO: add tests on the fact weights are properly loaded
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : Union[str, Any] = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
A__ : str = torch.randn(2 , 3 , dtype=snake_case )
with TemporaryDirectory() as tmp_dir:
A__ : List[str] = offload_weight(snake_case , """weight""" , snake_case , {} )
A__ : Union[str, Any] = os.path.join(snake_case , """weight.dat""" )
self.assertTrue(os.path.isfile(snake_case ) )
self.assertDictEqual(snake_case , {"""weight""": {"""shape""": [2, 3], """dtype""": str(snake_case ).split(""".""" )[1]}} )
A__ : str = load_offloaded_weight(snake_case , index["""weight"""] )
self.assertTrue(torch.equal(snake_case , snake_case ) )
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
A__ : str = ModelForTest()
A__ : Union[str, Any] = model.state_dict()
A__ : Optional[int] = {k: v for k, v in state_dict.items() if """linear2""" not in k}
A__ : List[Any] = {k: v for k, v in state_dict.items() if """linear2""" in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(snake_case , snake_case )
A__ : Dict = OffloadedWeightsLoader(state_dict=snake_case , save_folder=snake_case )
# Every key is there with the right value
self.assertEqual(sorted(snake_case ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(snake_case , weight_map[key] ) )
A__ : int = {k: v for k, v in state_dict.items() if """weight""" in k}
A__ : Tuple = {k: v for k, v in state_dict.items() if """weight""" not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(snake_case , snake_case )
A__ : Optional[Any] = OffloadedWeightsLoader(state_dict=snake_case , save_folder=snake_case )
# Every key is there with the right value
self.assertEqual(sorted(snake_case ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(snake_case , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(snake_case , snake_case )
# Duplicates are removed
A__ : int = OffloadedWeightsLoader(state_dict=snake_case , save_folder=snake_case )
# Every key is there with the right value
self.assertEqual(sorted(snake_case ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(snake_case , weight_map[key] ) )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : List[str] = {"""a.1""": 0, """a.10""": 1, """a.2""": 2}
A__ : str = extract_submodules_state_dict(snake_case , ["""a.1""", """a.2"""] )
self.assertDictEqual(snake_case , {"""a.1""": 0, """a.2""": 2} )
A__ : Dict = {"""a.1.a""": 0, """a.10.a""": 1, """a.2.a""": 2}
A__ : int = extract_submodules_state_dict(snake_case , ["""a.1""", """a.2"""] )
self.assertDictEqual(snake_case , {"""a.1.a""": 0, """a.2.a""": 2} )
| 296
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A_ = {
'''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''],
'''tokenization_mvp''': ['''MvpTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ['''MvpTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MvpForCausalLM''',
'''MvpForConditionalGeneration''',
'''MvpForQuestionAnswering''',
'''MvpForSequenceClassification''',
'''MvpModel''',
'''MvpPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 296
|
"""simple docstring"""
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , snake_case : str , snake_case : List[Any]=13 , snake_case : Union[str, Any]=7 , snake_case : Optional[Any]=True , snake_case : str=True , snake_case : Dict=False , snake_case : Union[str, Any]=True , snake_case : Optional[Any]=99 , snake_case : str=32 , snake_case : Tuple=5 , snake_case : List[str]=4 , snake_case : Optional[int]=37 , snake_case : str="gelu" , snake_case : Tuple=0.1 , snake_case : Optional[int]=0.1 , snake_case : int=512 , snake_case : List[str]=16 , snake_case : str=2 , snake_case : Optional[int]=0.02 , snake_case : str=3 , snake_case : Dict=4 , snake_case : Optional[Any]=None , ):
'''simple docstring'''
A__ : int = parent
A__ : Union[str, Any] = batch_size
A__ : Optional[int] = seq_length
A__ : List[Any] = is_training
A__ : List[str] = use_input_mask
A__ : Optional[Any] = use_token_type_ids
A__ : List[Any] = use_labels
A__ : Union[str, Any] = vocab_size
A__ : List[Any] = hidden_size
A__ : Any = num_hidden_layers
A__ : Any = num_attention_heads
A__ : Optional[int] = intermediate_size
A__ : Any = hidden_act
A__ : Tuple = hidden_dropout_prob
A__ : Dict = attention_probs_dropout_prob
A__ : Optional[int] = max_position_embeddings
A__ : Tuple = type_vocab_size
A__ : Union[str, Any] = type_sequence_label_size
A__ : List[str] = initializer_range
A__ : Any = num_labels
A__ : Any = num_choices
A__ : int = scope
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Tuple = None
if self.use_input_mask:
A__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
A__ : Union[str, Any] = None
if self.use_token_type_ids:
A__ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ : int = None
A__ : int = None
A__ : List[str] = None
if self.use_labels:
A__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
A__ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , )
def _UpperCamelCase ( self : Optional[int] , snake_case : Dict , snake_case : Any , snake_case : Dict , snake_case : Any , snake_case : Union[str, Any] , snake_case : Tuple , snake_case : Optional[Any] ):
'''simple docstring'''
A__ : Optional[Any] = BioGptModel(config=snake_case )
model.to(snake_case )
model.eval()
A__ : List[Any] = model(snake_case , attention_mask=snake_case )
A__ : Dict = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : Optional[int] , snake_case : Dict , snake_case : Optional[int] , snake_case : List[str] , snake_case : str , snake_case : Optional[Any] , snake_case : List[str] , snake_case : List[Any] , snake_case : Tuple , snake_case : Optional[Any] , ):
'''simple docstring'''
A__ : List[str] = BioGptForCausalLM(config=snake_case )
model.to(snake_case )
model.eval()
A__ : Union[str, Any] = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self : Any , snake_case : str , snake_case : Tuple , snake_case : int , snake_case : Optional[Any] , snake_case : Any , *snake_case : Dict ):
'''simple docstring'''
A__ : Union[str, Any] = BioGptModel(config=snake_case )
model.to(snake_case )
model.eval()
# create attention mask
A__ : List[Any] = torch.ones(input_ids.shape , dtype=torch.long , device=snake_case )
A__ : Any = self.seq_length // 2
A__ : str = 0
# first forward pass
A__ , A__ : List[Any] = model(snake_case , attention_mask=snake_case ).to_tuple()
# create hypothetical next token and extent to next_input_ids
A__ : int = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
A__ : List[str] = ids_tensor((1,) , snake_case ).item() + 1
A__ : Optional[int] = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
A__ : int = random_other_next_tokens
# append to next input_ids and attn_mask
A__ : str = torch.cat([input_ids, next_tokens] , dim=-1 )
A__ : List[Any] = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=snake_case )] , dim=1 , )
# get two different outputs
A__ : Union[str, Any] = model(snake_case , attention_mask=snake_case )["""last_hidden_state"""]
A__ : Optional[int] = model(snake_case , past_key_values=snake_case , attention_mask=snake_case )["""last_hidden_state"""]
# select random slice
A__ : List[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A__ : List[str] = output_from_no_past[:, -1, random_slice_idx].detach()
A__ : Any = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case , snake_case , atol=1e-3 ) )
def _UpperCamelCase ( self : int , snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : str , snake_case : int , snake_case : Optional[Any] , *snake_case : str ):
'''simple docstring'''
A__ : Dict = BioGptModel(config=snake_case ).to(snake_case ).eval()
A__ : Tuple = torch.ones(input_ids.shape , dtype=torch.long , device=snake_case )
# first forward pass
A__ : Dict = model(snake_case , attention_mask=snake_case , use_cache=snake_case )
A__ , A__ : List[Any] = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
A__ : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ : int = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
A__ : str = torch.cat([input_ids, next_tokens] , dim=-1 )
A__ : Optional[int] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
A__ : Any = model(snake_case , attention_mask=snake_case )["""last_hidden_state"""]
A__ : Union[str, Any] = model(snake_case , attention_mask=snake_case , past_key_values=snake_case )[
"""last_hidden_state"""
]
# select random slice
A__ : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A__ : Any = output_from_no_past[:, -3:, random_slice_idx].detach()
A__ : List[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case , snake_case , atol=1e-3 ) )
def _UpperCamelCase ( self : Optional[Any] , snake_case : Tuple , snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : Any , snake_case : Tuple , *snake_case : Union[str, Any] , snake_case : Union[str, Any]=False ):
'''simple docstring'''
A__ : Tuple = BioGptForCausalLM(snake_case )
model.to(snake_case )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
A__ : Optional[Any] = model(snake_case , labels=snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def _UpperCamelCase ( self : int , snake_case : Optional[Any] , *snake_case : Optional[int] ):
'''simple docstring'''
A__ : int = BioGptModel(snake_case )
A__ : Union[str, Any] = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def _UpperCamelCase ( self : Any , snake_case : Dict , snake_case : Tuple , snake_case : int , snake_case : Union[str, Any] , snake_case : Dict , *snake_case : Any ):
'''simple docstring'''
A__ : Union[str, Any] = self.num_labels
A__ : int = BioGptForTokenClassification(snake_case )
model.to(snake_case )
model.eval()
A__ : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : List[str] = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : str = config_and_inputs
A__ : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase ):
snake_case_ = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
snake_case_ = (BioGptForCausalLM,) if is_torch_available() else ()
snake_case_ = (
{
'feature-extraction': BioGptModel,
'text-classification': BioGptForSequenceClassification,
'text-generation': BioGptForCausalLM,
'token-classification': BioGptForTokenClassification,
'zero-shot': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = False
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : List[str] = BioGptModelTester(self )
A__ : List[Any] = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
A__ : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A__ : str = type
self.model_tester.create_and_check_model(*snake_case )
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*snake_case )
def _UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
A__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*snake_case , gradient_checkpointing=snake_case )
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
A__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*snake_case )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*snake_case )
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
A__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*snake_case )
@slow
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ : Tuple = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
model.to(snake_case )
A__ : Optional[int] = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
A__ : Any = """left"""
# Define PAD Token = EOS Token = 50256
A__ : Optional[int] = tokenizer.eos_token
A__ : Dict = model.config.eos_token_id
# use different length sentences to test batching
A__ : Union[str, Any] = [
"""Hello, my dog is a little""",
"""Today, I""",
]
A__ : List[str] = tokenizer(snake_case , return_tensors="""pt""" , padding=snake_case )
A__ : str = inputs["""input_ids"""].to(snake_case )
A__ : Dict = model.generate(
input_ids=snake_case , attention_mask=inputs["""attention_mask"""].to(snake_case ) , )
A__ : Optional[int] = tokenizer(sentences[0] , return_tensors="""pt""" ).input_ids.to(snake_case )
A__ : Any = model.generate(input_ids=snake_case )
A__ : List[str] = inputs_non_padded.shape[-1] - inputs["""attention_mask"""][-1].long().sum().cpu().item()
A__ : str = tokenizer(sentences[1] , return_tensors="""pt""" ).input_ids.to(snake_case )
A__ : Dict = model.generate(input_ids=snake_case , max_length=model.config.max_length - num_paddings )
A__ : Optional[Any] = tokenizer.batch_decode(snake_case , skip_special_tokens=snake_case )
A__ : List[Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=snake_case )
A__ : str = tokenizer.decode(output_padded[0] , skip_special_tokens=snake_case )
A__ : Optional[int] = [
"""Hello, my dog is a little bit bigger than a little bit.""",
"""Today, I have a good idea of how to use the information""",
]
self.assertListEqual(snake_case , snake_case )
self.assertListEqual(snake_case , [non_padded_sentence, padded_sentence] )
@slow
def _UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Optional[Any] = BioGptModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ , A__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
A__ : Optional[int] = 3
A__ : List[Any] = input_dict["""input_ids"""]
A__ : Dict = input_ids.ne(1 ).to(snake_case )
A__ : Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
A__ : Union[str, Any] = BioGptForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
A__ : int = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ , A__ : str = self.model_tester.prepare_config_and_inputs_for_common()
A__ : Any = 3
A__ : List[Any] = """multi_label_classification"""
A__ : Dict = input_dict["""input_ids"""]
A__ : Tuple = input_ids.ne(1 ).to(snake_case )
A__ : Any = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
A__ : Tuple = BioGptForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
A__ : List[str] = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
A__ : Optional[Any] = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
A__ : str = torch.tensor([[2, 4805, 9, 656, 21]] )
A__ : Dict = model(snake_case )[0]
A__ : Tuple = 4_2384
A__ : str = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , snake_case )
A__ : str = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) )
@slow
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
A__ : Tuple = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
A__ : Any = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
model.to(snake_case )
torch.manual_seed(0 )
A__ : Tuple = tokenizer("""COVID-19 is""" , return_tensors="""pt""" ).to(snake_case )
A__ : Optional[int] = model.generate(
**snake_case , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=snake_case , )
A__ : Optional[int] = tokenizer.decode(output_ids[0] , skip_special_tokens=snake_case )
A__ : List[str] = (
"""COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the"""
""" causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and"""
""" territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),"""
""" and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and"""
""" more than 800,000 deaths."""
)
self.assertEqual(snake_case , snake_case )
| 296
| 1
|
"""simple docstring"""
from __future__ import annotations
def _lowerCAmelCase ( UpperCAmelCase__ : str, UpperCAmelCase__ : list[str] | None = None ) ->list[list[str]]:
A__ : int = word_bank or []
# create a table
A__ : int = len(UpperCAmelCase__ ) + 1
A__ : list[list[list[str]]] = []
for _ in range(UpperCAmelCase__ ):
table.append([] )
# seed value
A__ : Union[str, Any] = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(UpperCAmelCase__ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(UpperCAmelCase__ )] == word:
A__ : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(UpperCAmelCase__ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(UpperCAmelCase__ )]:
combination.reverse()
return table[len(UpperCAmelCase__ )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 296
|
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
A_ = logging.get_logger(__name__)
A_ = {'''vocab_file''': '''spiece.model'''}
A_ = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
}
}
A_ = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
# Segments (not really needed)
A_ = 0
A_ = 1
A_ = 2
A_ = 3
A_ = 4
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = 'left'
def __init__( self : Dict , snake_case : int , snake_case : List[Any]=False , snake_case : List[str]=True , snake_case : Dict=False , snake_case : Optional[Any]="<s>" , snake_case : List[str]="</s>" , snake_case : Tuple="<unk>" , snake_case : Tuple="<sep>" , snake_case : Union[str, Any]="<pad>" , snake_case : Dict="<cls>" , snake_case : Optional[Any]="<mask>" , snake_case : Optional[int]=["<eop>", "<eod>"] , snake_case : Optional[Dict[str, Any]] = None , **snake_case : Dict , ):
'''simple docstring'''
A__ : Optional[int] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else mask_token
A__ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=snake_case , remove_space=snake_case , keep_accents=snake_case , bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , additional_special_tokens=snake_case , sp_model_kwargs=self.sp_model_kwargs , **snake_case , )
A__ : str = 3
A__ : str = do_lower_case
A__ : Optional[Any] = remove_space
A__ : List[Any] = keep_accents
A__ : Union[str, Any] = vocab_file
A__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case )
@property
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
return len(self.sp_model )
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : int = {self.convert_ids_to_tokens(snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ):
'''simple docstring'''
A__ : int = self.__dict__.copy()
A__ : int = None
return state
def __setstate__( self : Tuple , snake_case : Union[str, Any] ):
'''simple docstring'''
A__ : int = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
A__ : Optional[int] = {}
A__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCamelCase ( self : List[str] , snake_case : Optional[Any] ):
'''simple docstring'''
if self.remove_space:
A__ : Optional[Any] = """ """.join(inputs.strip().split() )
else:
A__ : Dict = inputs
A__ : str = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
A__ : Any = unicodedata.normalize("""NFKD""" , snake_case )
A__ : Optional[int] = """""".join([c for c in outputs if not unicodedata.combining(snake_case )] )
if self.do_lower_case:
A__ : Any = outputs.lower()
return outputs
def _UpperCamelCase ( self : Union[str, Any] , snake_case : str ):
'''simple docstring'''
A__ : Dict = self.preprocess_text(snake_case )
A__ : Dict = self.sp_model.encode(snake_case , out_type=snake_case )
A__ : Optional[int] = []
for piece in pieces:
if len(snake_case ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
A__ : Optional[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(snake_case , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
A__ : int = cur_pieces[1:]
else:
A__ : Any = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(snake_case )
else:
new_pieces.append(snake_case )
return new_pieces
def _UpperCamelCase ( self : List[str] , snake_case : Tuple ):
'''simple docstring'''
return self.sp_model.PieceToId(snake_case )
def _UpperCamelCase ( self : List[str] , snake_case : Any ):
'''simple docstring'''
return self.sp_model.IdToPiece(snake_case )
def _UpperCamelCase ( self : Optional[int] , snake_case : Any ):
'''simple docstring'''
A__ : Union[str, Any] = """""".join(snake_case ).replace(snake_case , """ """ ).strip()
return out_string
def _UpperCamelCase ( self : int , snake_case : List[int] , snake_case : bool = False , snake_case : bool = None , snake_case : bool = True , **snake_case : Union[str, Any] , ):
'''simple docstring'''
A__ : List[str] = kwargs.pop("""use_source_tokenizer""" , snake_case )
A__ : Any = self.convert_ids_to_tokens(snake_case , skip_special_tokens=snake_case )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
A__ : Any = []
A__ : Any = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(snake_case ) )
A__ : str = []
sub_texts.append(snake_case )
else:
current_sub_text.append(snake_case )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(snake_case ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
A__ : Dict = """""".join(snake_case )
A__ : int = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
A__ : Tuple = self.clean_up_tokenization(snake_case )
return clean_text
else:
return text
def _UpperCamelCase ( self : str , snake_case : List[int] , snake_case : Optional[List[int]] = None ):
'''simple docstring'''
A__ : Tuple = [self.sep_token_id]
A__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _UpperCamelCase ( self : Dict , snake_case : List[int] , snake_case : Optional[List[int]] = None , snake_case : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case )
if token_ids_a is not None:
return ([0] * len(snake_case )) + [1] + ([0] * len(snake_case )) + [1, 1]
return ([0] * len(snake_case )) + [1, 1]
def _UpperCamelCase ( self : str , snake_case : List[int] , snake_case : Optional[List[int]] = None ):
'''simple docstring'''
A__ : Any = [self.sep_token_id]
A__ : int = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _UpperCamelCase ( self : Optional[Any] , snake_case : str , snake_case : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(snake_case ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A__ : List[Any] = os.path.join(
snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case , """wb""" ) as fi:
A__ : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(snake_case )
return (out_vocab_file,)
| 296
| 1
|
"""simple docstring"""
class __SCREAMING_SNAKE_CASE :
def __init__( self : Tuple ):
'''simple docstring'''
A__ : Optional[int] = {}
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
print(self.vertex )
for i in self.vertex:
print(snake_case , """ -> """ , """ -> """.join([str(snake_case ) for j in self.vertex[i]] ) )
def _UpperCamelCase ( self : List[Any] , snake_case : int , snake_case : int ):
'''simple docstring'''
if from_vertex in self.vertex:
self.vertex[from_vertex].append(snake_case )
else:
# else make a new vertex
A__ : Tuple = [to_vertex]
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : str = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(snake_case , snake_case )
def _UpperCamelCase ( self : List[Any] , snake_case : int , snake_case : list ):
'''simple docstring'''
A__ : Optional[int] = True
print(snake_case , end=""" """ )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(snake_case , snake_case )
if __name__ == "__main__":
A_ = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('''DFS:''')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 296
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
def _lowerCAmelCase ( UpperCAmelCase__ : List[Any] ) ->List[str]:
A__ : Union[str, Any] = DPTConfig()
if "large" in checkpoint_url:
A__ : int = 1_0_2_4
A__ : Union[str, Any] = 4_0_9_6
A__ : Optional[int] = 2_4
A__ : int = 1_6
A__ : Union[str, Any] = [5, 1_1, 1_7, 2_3]
A__ : Tuple = [2_5_6, 5_1_2, 1_0_2_4, 1_0_2_4]
A__ : Tuple = (1, 3_8_4, 3_8_4)
if "ade" in checkpoint_url:
A__ : Optional[int] = True
A__ : int = 1_5_0
A__ : Union[str, Any] = """huggingface/label-files"""
A__ : List[Any] = """ade20k-id2label.json"""
A__ : Union[str, Any] = json.load(open(cached_download(hf_hub_url(UpperCAmelCase__, UpperCAmelCase__, repo_type="""dataset""" ) ), """r""" ) )
A__ : List[Any] = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()}
A__ : Dict = idalabel
A__ : List[Any] = {v: k for k, v in idalabel.items()}
A__ : Optional[Any] = [1, 1_5_0, 4_8_0, 4_8_0]
return config, expected_shape
def _lowerCAmelCase ( UpperCAmelCase__ : int ) ->Any:
A__ : List[Any] = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase__, UpperCAmelCase__ )
def _lowerCAmelCase ( UpperCAmelCase__ : Union[str, Any] ) ->List[str]:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
A__ : str = name.replace("""pretrained.model""", """dpt.encoder""" )
if "pretrained.model" in name:
A__ : Dict = name.replace("""pretrained.model""", """dpt.embeddings""" )
if "patch_embed" in name:
A__ : List[Any] = name.replace("""patch_embed""", """patch_embeddings""" )
if "pos_embed" in name:
A__ : int = name.replace("""pos_embed""", """position_embeddings""" )
if "attn.proj" in name:
A__ : Tuple = name.replace("""attn.proj""", """attention.output.dense""" )
if "proj" in name and "project" not in name:
A__ : List[Any] = name.replace("""proj""", """projection""" )
if "blocks" in name:
A__ : Optional[Any] = name.replace("""blocks""", """layer""" )
if "mlp.fc1" in name:
A__ : int = name.replace("""mlp.fc1""", """intermediate.dense""" )
if "mlp.fc2" in name:
A__ : List[str] = name.replace("""mlp.fc2""", """output.dense""" )
if "norm1" in name:
A__ : Any = name.replace("""norm1""", """layernorm_before""" )
if "norm2" in name:
A__ : List[str] = name.replace("""norm2""", """layernorm_after""" )
if "scratch.output_conv" in name:
A__ : Optional[int] = name.replace("""scratch.output_conv""", """head""" )
if "scratch" in name:
A__ : List[str] = name.replace("""scratch""", """neck""" )
if "layer1_rn" in name:
A__ : List[str] = name.replace("""layer1_rn""", """convs.0""" )
if "layer2_rn" in name:
A__ : Optional[int] = name.replace("""layer2_rn""", """convs.1""" )
if "layer3_rn" in name:
A__ : Any = name.replace("""layer3_rn""", """convs.2""" )
if "layer4_rn" in name:
A__ : Any = name.replace("""layer4_rn""", """convs.3""" )
if "refinenet" in name:
A__ : Union[str, Any] = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
A__ : str = name.replace(f'refinenet{layer_idx}', f'fusion_stage.layers.{abs(layer_idx-4 )}' )
if "out_conv" in name:
A__ : Optional[Any] = name.replace("""out_conv""", """projection""" )
if "resConfUnit1" in name:
A__ : List[Any] = name.replace("""resConfUnit1""", """residual_layer1""" )
if "resConfUnit2" in name:
A__ : Tuple = name.replace("""resConfUnit2""", """residual_layer2""" )
if "conv1" in name:
A__ : Tuple = name.replace("""conv1""", """convolution1""" )
if "conv2" in name:
A__ : List[Any] = name.replace("""conv2""", """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
A__ : Union[str, Any] = name.replace("""pretrained.act_postprocess1.0.project.0""", """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
A__ : Tuple = name.replace("""pretrained.act_postprocess2.0.project.0""", """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
A__ : Optional[Any] = name.replace("""pretrained.act_postprocess3.0.project.0""", """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
A__ : Optional[Any] = name.replace("""pretrained.act_postprocess4.0.project.0""", """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
A__ : Any = name.replace("""pretrained.act_postprocess1.3""", """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
A__ : List[Any] = name.replace("""pretrained.act_postprocess1.4""", """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
A__ : Dict = name.replace("""pretrained.act_postprocess2.3""", """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
A__ : Optional[Any] = name.replace("""pretrained.act_postprocess2.4""", """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
A__ : Union[str, Any] = name.replace("""pretrained.act_postprocess3.3""", """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
A__ : Optional[int] = name.replace("""pretrained.act_postprocess4.3""", """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
A__ : Dict = name.replace("""pretrained.act_postprocess4.4""", """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
A__ : Union[str, Any] = name.replace("""pretrained""", """dpt""" )
if "bn" in name:
A__ : Union[str, Any] = name.replace("""bn""", """batch_norm""" )
if "head" in name:
A__ : Dict = name.replace("""head""", """head.head""" )
if "encoder.norm" in name:
A__ : Optional[int] = name.replace("""encoder.norm""", """layernorm""" )
if "auxlayer" in name:
A__ : List[str] = name.replace("""auxlayer""", """auxiliary_head.head""" )
return name
def _lowerCAmelCase ( UpperCAmelCase__ : int, UpperCAmelCase__ : Dict ) ->str:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ : Any = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight' )
A__ : Tuple = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
A__ : List[str] = in_proj_weight[: config.hidden_size, :]
A__ : int = in_proj_bias[: config.hidden_size]
A__ : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ : str = in_proj_weight[
-config.hidden_size :, :
]
A__ : Optional[Any] = in_proj_bias[-config.hidden_size :]
def _lowerCAmelCase ( ) ->List[str]:
A__ : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A__ : int = Image.open(requests.get(UpperCAmelCase__, stream=UpperCAmelCase__ ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase ( UpperCAmelCase__ : int, UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : str, UpperCAmelCase__ : int ) ->str:
A__ , A__ : Dict = get_dpt_config(UpperCAmelCase__ )
# load original state_dict from URL
A__ : Any = torch.hub.load_state_dict_from_url(UpperCAmelCase__, map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(UpperCAmelCase__ )
# rename keys
for key in state_dict.copy().keys():
A__ : int = state_dict.pop(UpperCAmelCase__ )
A__ : str = val
# read in qkv matrices
read_in_q_k_v(UpperCAmelCase__, UpperCAmelCase__ )
# load HuggingFace model
A__ : Optional[Any] = DPTForSemanticSegmentation(UpperCAmelCase__ ) if """ade""" in checkpoint_url else DPTForDepthEstimation(UpperCAmelCase__ )
model.load_state_dict(UpperCAmelCase__ )
model.eval()
# Check outputs on an image
A__ : Optional[Any] = 4_8_0 if """ade""" in checkpoint_url else 3_8_4
A__ : Dict = DPTImageProcessor(size=UpperCAmelCase__ )
A__ : Optional[int] = prepare_img()
A__ : Any = image_processor(UpperCAmelCase__, return_tensors="""pt""" )
# forward pass
A__ : List[str] = model(**UpperCAmelCase__ ).logits if """ade""" in checkpoint_url else model(**UpperCAmelCase__ ).predicted_depth
# Assert logits
A__ : Optional[Any] = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]] )
if "ade" in checkpoint_url:
A__ : Optional[int] = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]] )
assert outputs.shape == torch.Size(UpperCAmelCase__ )
assert (
torch.allclose(outputs[0, 0, :3, :3], UpperCAmelCase__, atol=1e-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3], UpperCAmelCase__ )
)
Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCAmelCase__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(UpperCAmelCase__ )
if push_to_hub:
print("""Pushing model to hub...""" )
model.push_to_hub(
repo_path_or_name=Path(UpperCAmelCase__, UpperCAmelCase__ ), organization="""nielsr""", commit_message="""Add model""", use_temp_dir=UpperCAmelCase__, )
image_processor.push_to_hub(
repo_path_or_name=Path(UpperCAmelCase__, UpperCAmelCase__ ), organization="""nielsr""", commit_message="""Add image processor""", use_temp_dir=UpperCAmelCase__, )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''',
type=str,
help='''URL of the original DPT checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
parser.add_argument(
'''--model_name''',
default='''dpt-large''',
type=str,
help='''Name of the model, in case you\'re pushing to the hub.''',
)
A_ = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 296
| 1
|
"""simple docstring"""
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
A_ = get_tests_dir('''fixtures/test_sentencepiece.model''')
A_ = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
A_ = '''pt''' if is_torch_available() else '''tf'''
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase ):
snake_case_ = CamembertTokenizer
snake_case_ = CamembertTokenizerFast
snake_case_ = True
snake_case_ = True
def _UpperCamelCase ( self : int ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
A__ : List[str] = CamembertTokenizer(snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ : str = """<pad>"""
A__ : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case ) , snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case ) , snake_case )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>NOTUSED""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(snake_case ) , 1004 )
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1005 )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : Optional[int] = CamembertTokenizer(snake_case )
tokenizer.save_pretrained(self.tmpdirname )
A__ : Optional[Any] = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
A__ : Tuple = """I was born in 92000, and this is falsé."""
A__ : Optional[Any] = tokenizer.encode(snake_case )
A__ : List[Any] = rust_tokenizer.encode(snake_case )
self.assertListEqual(snake_case , snake_case )
A__ : Tuple = tokenizer.encode(snake_case , add_special_tokens=snake_case )
A__ : List[str] = rust_tokenizer.encode(snake_case , add_special_tokens=snake_case )
self.assertListEqual(snake_case , snake_case )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
A__ : List[Any] = tokenizer.convert_ids_to_tokens(snake_case )
A__ : Tuple = rust_tokenizer.tokenize(snake_case )
self.assertListEqual(snake_case , snake_case )
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
A__ : Any = self.get_tokenizer()
A__ : Tuple = self.get_rust_tokenizer()
A__ : Dict = """I was born in 92000, and this is falsé."""
A__ : Optional[int] = tokenizer.tokenize(snake_case )
A__ : int = rust_tokenizer.tokenize(snake_case )
self.assertListEqual(snake_case , snake_case )
A__ : List[Any] = tokenizer.encode(snake_case , add_special_tokens=snake_case )
A__ : Tuple = rust_tokenizer.encode(snake_case , add_special_tokens=snake_case )
self.assertListEqual(snake_case , snake_case )
A__ : int = self.get_rust_tokenizer()
A__ : Tuple = tokenizer.encode(snake_case )
A__ : Union[str, Any] = rust_tokenizer.encode(snake_case )
self.assertListEqual(snake_case , snake_case )
@slow
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ : Union[str, Any] = {"""input_ids""": [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 2_7575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 2_2804, 1_8818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 1_0326, 24, 2267, 20, 416, 5072, 1_5612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
A__ : Tuple = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=snake_case , model_name="""camembert-base""" , revision="""3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf""" , sequences=snake_case , )
| 296
|
"""simple docstring"""
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
A_ = '''src/diffusers'''
A_ = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
A_ = importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
A_ = spec.loader.load_module()
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Optional[Any] ) ->Any:
return line.startswith(UpperCAmelCase__ ) or len(UpperCAmelCase__ ) <= 1 or re.search(R"""^\s*\)(\s*->.*:|:)\s*$""", UpperCAmelCase__ ) is not None
def _lowerCAmelCase ( UpperCAmelCase__ : List[str] ) ->Union[str, Any]:
A__ : Any = object_name.split(""".""" )
A__ : int = 0
# First let's find the module where our object lives.
A__ : str = parts[i]
while i < len(UpperCAmelCase__ ) and not os.path.isfile(os.path.join(UpperCAmelCase__, f'{module}.py' ) ):
i += 1
if i < len(UpperCAmelCase__ ):
A__ : Union[str, Any] = os.path.join(UpperCAmelCase__, parts[i] )
if i >= len(UpperCAmelCase__ ):
raise ValueError(f'`object_name` should begin with the name of a module of diffusers but got {object_name}.' )
with open(os.path.join(UpperCAmelCase__, f'{module}.py' ), """r""", encoding="""utf-8""", newline="""\n""" ) as f:
A__ : List[Any] = f.readlines()
# Now let's find the class / func in the code!
A__ : Optional[Any] = """"""
A__ : Any = 0
for name in parts[i + 1 :]:
while (
line_index < len(UpperCAmelCase__ ) and re.search(Rf'^{indent}(class|def)\s+{name}(\(|\:)', lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(UpperCAmelCase__ ):
raise ValueError(f' {object_name} does not match any function or class in {module}.' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
A__ : List[Any] = line_index
while line_index < len(UpperCAmelCase__ ) and _should_continue(lines[line_index], UpperCAmelCase__ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
A__ : List[Any] = lines[start_index:line_index]
return "".join(UpperCAmelCase__ )
A_ = re.compile(r'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
A_ = re.compile(r'''^\s*(\S+)->(\S+)(\s+.*|$)''')
A_ = re.compile(r'''<FILL\s+[^>]*>''')
def _lowerCAmelCase ( UpperCAmelCase__ : List[str] ) ->Optional[Any]:
A__ : Dict = code.split("""\n""" )
A__ : List[Any] = 0
while idx < len(UpperCAmelCase__ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(UpperCAmelCase__ ):
return re.search(R"""^(\s*)\S""", lines[idx] ).groups()[0]
return ""
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[Any] ) ->int:
A__ : str = len(get_indent(UpperCAmelCase__ ) ) > 0
if has_indent:
A__ : Union[str, Any] = f'class Bla:\n{code}'
A__ : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa}, line_length=1_1_9, preview=UpperCAmelCase__ )
A__ : Tuple = black.format_str(UpperCAmelCase__, mode=UpperCAmelCase__ )
A__ , A__ : List[Any] = style_docstrings_in_code(UpperCAmelCase__ )
return result[len("""class Bla:\n""" ) :] if has_indent else result
def _lowerCAmelCase ( UpperCAmelCase__ : Any, UpperCAmelCase__ : Dict=False ) ->List[Any]:
with open(UpperCAmelCase__, """r""", encoding="""utf-8""", newline="""\n""" ) as f:
A__ : int = f.readlines()
A__ : Dict = []
A__ : List[str] = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(UpperCAmelCase__ ):
A__ : Dict = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
A__ , A__ , A__ : Dict = search.groups()
A__ : Tuple = find_code_in_diffusers(UpperCAmelCase__ )
A__ : int = get_indent(UpperCAmelCase__ )
A__ : List[str] = line_index + 1 if indent == theoretical_indent else line_index + 2
A__ : Tuple = theoretical_indent
A__ : Optional[Any] = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
A__ : Tuple = True
while line_index < len(UpperCAmelCase__ ) and should_continue:
line_index += 1
if line_index >= len(UpperCAmelCase__ ):
break
A__ : Optional[int] = lines[line_index]
A__ : Tuple = _should_continue(UpperCAmelCase__, UpperCAmelCase__ ) and re.search(f'^{indent}# End copy', UpperCAmelCase__ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
A__ : Dict = lines[start_index:line_index]
A__ : Tuple = """""".join(UpperCAmelCase__ )
# Remove any nested `Copied from` comments to avoid circular copies
A__ : Optional[int] = [line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(UpperCAmelCase__ ) is None]
A__ : Optional[Any] = """\n""".join(UpperCAmelCase__ )
# Before comparing, use the `replace_pattern` on the original code.
if len(UpperCAmelCase__ ) > 0:
A__ : int = replace_pattern.replace("""with""", """""" ).split(""",""" )
A__ : List[Any] = [_re_replace_pattern.search(UpperCAmelCase__ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
A__ , A__ , A__ : Union[str, Any] = pattern.groups()
A__ : Union[str, Any] = re.sub(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
if option.strip() == "all-casing":
A__ : List[Any] = re.sub(obja.lower(), obja.lower(), UpperCAmelCase__ )
A__ : Tuple = re.sub(obja.upper(), obja.upper(), UpperCAmelCase__ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
A__ : Optional[int] = blackify(lines[start_index - 1] + theoretical_code )
A__ : List[Any] = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
A__ : List[Any] = lines[:start_index] + [theoretical_code] + lines[line_index:]
A__ : Tuple = start_index + 1
if overwrite and len(UpperCAmelCase__ ) > 0:
# Warn the user a file has been modified.
print(f'Detected changes, rewriting {filename}.' )
with open(UpperCAmelCase__, """w""", encoding="""utf-8""", newline="""\n""" ) as f:
f.writelines(UpperCAmelCase__ )
return diffs
def _lowerCAmelCase ( UpperCAmelCase__ : bool = False ) ->Any:
A__ : Dict = glob.glob(os.path.join(UpperCAmelCase__, """**/*.py""" ), recursive=UpperCAmelCase__ )
A__ : str = []
for filename in all_files:
A__ : Any = is_copy_consistent(UpperCAmelCase__, UpperCAmelCase__ )
diffs += [f'- {filename}: copy does not match {d[0]} at line {d[1]}' for d in new_diffs]
if not overwrite and len(UpperCAmelCase__ ) > 0:
A__ : Any = """\n""".join(UpperCAmelCase__ )
raise Exception(
"""Found the following copy inconsistencies:\n"""
+ diff
+ """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
A_ = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 296
| 1
|
"""simple docstring"""
from __future__ import annotations
def _lowerCAmelCase ( UpperCAmelCase__ : list[list[int]] ) ->bool:
A__ : Optional[int] = len(UpperCAmelCase__ )
# We need to create solution object to save path.
A__ : List[Any] = [[0 for _ in range(UpperCAmelCase__ )] for _ in range(UpperCAmelCase__ )]
A__ : List[Any] = run_maze(UpperCAmelCase__, 0, 0, UpperCAmelCase__ )
if solved:
print("""\n""".join(str(UpperCAmelCase__ ) for row in solutions ) )
else:
print("""No solution exists!""" )
return solved
def _lowerCAmelCase ( UpperCAmelCase__ : list[list[int]], UpperCAmelCase__ : int, UpperCAmelCase__ : int, UpperCAmelCase__ : list[list[int]] ) ->bool:
A__ : List[Any] = len(UpperCAmelCase__ )
# Final check point.
if i == j == (size - 1):
A__ : Dict = 1
return True
A__ : List[str] = (not i < 0) and (not j < 0) # Check lower bounds
A__ : List[Any] = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
A__ : List[str] = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
A__ : Any = 1
# check for directions
if (
run_maze(UpperCAmelCase__, i + 1, UpperCAmelCase__, UpperCAmelCase__ )
or run_maze(UpperCAmelCase__, UpperCAmelCase__, j + 1, UpperCAmelCase__ )
or run_maze(UpperCAmelCase__, i - 1, UpperCAmelCase__, UpperCAmelCase__ )
or run_maze(UpperCAmelCase__, UpperCAmelCase__, j - 1, UpperCAmelCase__ )
):
return True
A__ : Union[str, Any] = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 296
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A_ = {
'''configuration_llama''': ['''LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LlamaConfig'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ['''LlamaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ['''LlamaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''LlamaForCausalLM''',
'''LlamaModel''',
'''LlamaPreTrainedModel''',
'''LlamaForSequenceClassification''',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 296
| 1
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'''microsoft/wavlm-base''': '''https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json''',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = 'wavlm'
def __init__( self : int , snake_case : Dict=32 , snake_case : Optional[Any]=768 , snake_case : Optional[int]=12 , snake_case : str=12 , snake_case : str=3072 , snake_case : Union[str, Any]="gelu" , snake_case : List[str]=0.1 , snake_case : Tuple=0.1 , snake_case : Dict=0.1 , snake_case : List[Any]=0.0 , snake_case : Optional[Any]=0.1 , snake_case : Optional[int]=0.1 , snake_case : int=0.02 , snake_case : Dict=1e-5 , snake_case : Optional[Any]="group" , snake_case : Any="gelu" , snake_case : List[str]=(512, 512, 512, 512, 512, 512, 512) , snake_case : Optional[Any]=(5, 2, 2, 2, 2, 2, 2) , snake_case : List[str]=(10, 3, 3, 3, 3, 2, 2) , snake_case : List[str]=False , snake_case : Optional[Any]=128 , snake_case : int=16 , snake_case : List[str]=320 , snake_case : Dict=800 , snake_case : Optional[Any]=False , snake_case : Union[str, Any]=True , snake_case : str=0.05 , snake_case : Tuple=10 , snake_case : List[Any]=2 , snake_case : Any=0.0 , snake_case : int=10 , snake_case : str=320 , snake_case : Optional[Any]=2 , snake_case : List[Any]=0.1 , snake_case : Union[str, Any]=100 , snake_case : Optional[int]=256 , snake_case : List[str]=256 , snake_case : Optional[int]=0.1 , snake_case : Tuple="mean" , snake_case : Any=False , snake_case : Optional[int]=False , snake_case : List[str]=256 , snake_case : int=(512, 512, 512, 512, 1500) , snake_case : str=(5, 3, 3, 1, 1) , snake_case : Optional[Any]=(1, 2, 3, 1, 1) , snake_case : int=512 , snake_case : Any=80 , snake_case : Any=0 , snake_case : Dict=1 , snake_case : Dict=2 , snake_case : Optional[Any]=False , snake_case : Any=3 , snake_case : str=2 , snake_case : Optional[Any]=3 , snake_case : str=None , **snake_case : Tuple , ):
'''simple docstring'''
super().__init__(**snake_case , pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case )
A__ : Any = hidden_size
A__ : List[Any] = feat_extract_norm
A__ : Any = feat_extract_activation
A__ : Dict = list(snake_case )
A__ : int = list(snake_case )
A__ : Union[str, Any] = list(snake_case )
A__ : int = conv_bias
A__ : Optional[int] = num_buckets
A__ : List[str] = max_bucket_distance
A__ : Any = num_conv_pos_embeddings
A__ : int = num_conv_pos_embedding_groups
A__ : Union[str, Any] = len(self.conv_dim )
A__ : Tuple = num_hidden_layers
A__ : Dict = intermediate_size
A__ : Union[str, Any] = hidden_act
A__ : Union[str, Any] = num_attention_heads
A__ : List[Any] = hidden_dropout
A__ : List[Any] = attention_dropout
A__ : Optional[int] = activation_dropout
A__ : Any = feat_proj_dropout
A__ : str = final_dropout
A__ : str = layerdrop
A__ : Optional[int] = layer_norm_eps
A__ : List[str] = initializer_range
A__ : str = num_ctc_classes
A__ : List[str] = vocab_size
A__ : List[str] = do_stable_layer_norm
A__ : Optional[Any] = use_weighted_layer_sum
A__ : Dict = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A__ : Union[str, Any] = apply_spec_augment
A__ : List[Any] = mask_time_prob
A__ : str = mask_time_length
A__ : str = mask_time_min_masks
A__ : List[str] = mask_feature_prob
A__ : Union[str, Any] = mask_feature_length
# parameters for pretraining with codevector quantized representations
A__ : int = num_codevectors_per_group
A__ : Any = num_codevector_groups
A__ : Tuple = contrastive_logits_temperature
A__ : str = num_negatives
A__ : Optional[Any] = codevector_dim
A__ : List[Any] = proj_codevector_dim
A__ : Tuple = diversity_loss_weight
# ctc loss
A__ : int = ctc_loss_reduction
A__ : Union[str, Any] = ctc_zero_infinity
# adapter
A__ : List[str] = add_adapter
A__ : Dict = adapter_kernel_size
A__ : Optional[int] = adapter_stride
A__ : Optional[int] = num_adapter_layers
A__ : str = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
A__ : Dict = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
A__ : str = list(snake_case )
A__ : Union[str, Any] = list(snake_case )
A__ : Tuple = list(snake_case )
A__ : Tuple = xvector_output_dim
@property
def _UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 296
|
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
A_ = object()
# For specifying empty leaf dict `{}`
A_ = object()
def _lowerCAmelCase ( UpperCAmelCase__ : Tuple, UpperCAmelCase__ : List[Any] ) ->Dict:
A__ : Union[str, Any] = tuple((re.compile(x + """$""" ) for x in qs) )
for i in range(len(UpperCAmelCase__ ) - len(UpperCAmelCase__ ) + 1 ):
A__ : Optional[Any] = [x.match(UpperCAmelCase__ ) for x, y in zip(UpperCAmelCase__, ks[i:] )]
if matches and all(UpperCAmelCase__ ):
return True
return False
def _lowerCAmelCase ( UpperCAmelCase__ : List[Any] ) ->Dict:
def replace(UpperCAmelCase__ : int, UpperCAmelCase__ : List[str] ):
for rule, replacement in rules:
if _match(UpperCAmelCase__, UpperCAmelCase__ ):
return replacement
return val
return replace
def _lowerCAmelCase ( ) ->Tuple:
return [
# embeddings
(("transformer", "wpe", "embedding"), P("""mp""", UpperCAmelCase__ )),
(("transformer", "wte", "embedding"), P("""mp""", UpperCAmelCase__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(UpperCAmelCase__, """mp""" )),
(("attention", "out_proj", "kernel"), P("""mp""", UpperCAmelCase__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(UpperCAmelCase__, """mp""" )),
(("mlp", "c_fc", "bias"), P("""mp""" )),
(("mlp", "c_proj", "kernel"), P("""mp""", UpperCAmelCase__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def _lowerCAmelCase ( UpperCAmelCase__ : Tuple ) ->Any:
A__ : Union[str, Any] = _get_partition_rules()
A__ : int = _replacement_rules(UpperCAmelCase__ )
A__ : Tuple = {k: _unmatched for k in flatten_dict(UpperCAmelCase__ )}
A__ : Optional[int] = {k: replace(UpperCAmelCase__, UpperCAmelCase__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(UpperCAmelCase__ ) )
| 296
| 1
|
"""simple docstring"""
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] , snake_case : Optional[int] , snake_case : List[Any]=13 , snake_case : List[str]=7 , snake_case : Optional[Any]=True , snake_case : Optional[Any]=True , snake_case : Tuple=False , snake_case : Optional[int]=True , snake_case : Union[str, Any]=99 , snake_case : Any=64 , snake_case : Dict=5 , snake_case : Tuple=4 , snake_case : str=64 , snake_case : str="gelu" , snake_case : Any=0.1 , snake_case : Tuple=0.1 , snake_case : Dict=512 , snake_case : int=16 , snake_case : List[str]=2 , snake_case : Any=0.02 , snake_case : Optional[Any]=3 , snake_case : Optional[Any]=4 , snake_case : Dict=None , ):
'''simple docstring'''
A__ : int = parent
A__ : int = batch_size
A__ : Union[str, Any] = seq_length
A__ : List[str] = is_training
A__ : str = use_input_mask
A__ : List[Any] = use_token_type_ids
A__ : int = use_labels
A__ : Any = vocab_size
A__ : str = hidden_size
A__ : Any = num_hidden_layers
A__ : str = num_attention_heads
A__ : Any = intermediate_size
A__ : List[Any] = hidden_act
A__ : int = hidden_dropout_prob
A__ : Union[str, Any] = attention_probs_dropout_prob
A__ : Union[str, Any] = max_position_embeddings
A__ : int = type_vocab_size
A__ : List[Any] = type_sequence_label_size
A__ : Any = initializer_range
A__ : Optional[Any] = num_labels
A__ : List[Any] = num_choices
A__ : Optional[int] = scope
def _UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return MPNetConfig.from_pretrained("""microsoft/mpnet-base""" )
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
A__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : List[Any] = None
if self.use_input_mask:
A__ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
A__ : Dict = None
A__ : Tuple = None
A__ : Tuple = None
if self.use_labels:
A__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : Any = ids_tensor([self.batch_size] , self.num_choices )
A__ : List[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _UpperCamelCase ( self : Any , snake_case : str , snake_case : Tuple , snake_case : Any , snake_case : Optional[Any] , snake_case : Tuple , snake_case : Dict ):
'''simple docstring'''
A__ : Optional[Any] = MPNetModel(config=snake_case )
model.to(snake_case )
model.eval()
A__ : int = model(snake_case , snake_case )
A__ : Union[str, Any] = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _UpperCamelCase ( self : Optional[int] , snake_case : Any , snake_case : Any , snake_case : Dict , snake_case : str , snake_case : Tuple , snake_case : Any ):
'''simple docstring'''
A__ : Optional[int] = MPNetForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
A__ : Dict = model(
snake_case , attention_mask=snake_case , start_positions=snake_case , end_positions=snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self : Dict , snake_case : List[Any] , snake_case : str , snake_case : str , snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : int ):
'''simple docstring'''
A__ : Tuple = self.num_labels
A__ : Tuple = MPNetForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
A__ : Any = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self : str , snake_case : int , snake_case : List[Any] , snake_case : Union[str, Any] , snake_case : List[str] , snake_case : str , snake_case : int ):
'''simple docstring'''
A__ : List[Any] = self.num_choices
A__ : str = MPNetForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
A__ : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : Dict = model(
snake_case , attention_mask=snake_case , labels=snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCamelCase ( self : int , snake_case : Dict , snake_case : Tuple , snake_case : List[Any] , snake_case : List[Any] , snake_case : List[Any] , snake_case : List[Any] ):
'''simple docstring'''
A__ : List[str] = self.num_labels
A__ : Tuple = MPNetForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
A__ : Dict = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : Union[str, Any] = self.prepare_config_and_inputs()
((A__) , (A__) , (A__) , (A__) , (A__) , (A__)) : int = config_and_inputs
A__ : Tuple = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
snake_case_ = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
snake_case_ = (
{
'feature-extraction': MPNetModel,
'fill-mask': MPNetForMaskedLM,
'question-answering': MPNetForQuestionAnswering,
'text-classification': MPNetForSequenceClassification,
'token-classification': MPNetForTokenClassification,
'zero-shot': MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = True
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
A__ : List[Any] = MPNetModelTester(self )
A__ : List[Any] = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
A__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*snake_case )
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*snake_case )
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*snake_case )
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*snake_case )
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*snake_case )
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
A__ : List[str] = MPNetModel.from_pretrained("""microsoft/mpnet-base""" )
A__ : int = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
A__ : Dict = model(snake_case )[0]
A__ : Union[str, Any] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , snake_case )
A__ : str = torch.tensor(
[[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) )
| 296
|
"""simple docstring"""
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[str] , snake_case : Tuple , snake_case : List[str]=2 , snake_case : List[str]=8 , snake_case : List[Any]=True , snake_case : Optional[Any]=True , snake_case : List[Any]=True , snake_case : Dict=True , snake_case : Tuple=99 , snake_case : Dict=16 , snake_case : Dict=5 , snake_case : int=2 , snake_case : Any=36 , snake_case : str="gelu" , snake_case : Dict=0.0 , snake_case : List[Any]=0.0 , snake_case : int=512 , snake_case : List[Any]=16 , snake_case : Tuple=2 , snake_case : Any=0.02 , snake_case : Optional[Any]=3 , snake_case : List[Any]=4 , snake_case : str=None , ):
'''simple docstring'''
A__ : Union[str, Any] = parent
A__ : Optional[Any] = batch_size
A__ : Dict = seq_length
A__ : str = is_training
A__ : Tuple = use_input_mask
A__ : Dict = use_token_type_ids
A__ : Dict = use_labels
A__ : int = vocab_size
A__ : List[str] = hidden_size
A__ : Union[str, Any] = num_hidden_layers
A__ : int = num_attention_heads
A__ : List[str] = intermediate_size
A__ : int = hidden_act
A__ : str = hidden_dropout_prob
A__ : Tuple = attention_probs_dropout_prob
A__ : Any = max_position_embeddings
A__ : Optional[int] = type_vocab_size
A__ : int = type_sequence_label_size
A__ : Optional[Any] = initializer_range
A__ : int = num_labels
A__ : Optional[int] = num_choices
A__ : Optional[int] = scope
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
A__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Any = None
if self.use_input_mask:
A__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
A__ : Optional[int] = None
if self.use_token_type_ids:
A__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ : Dict = None
A__ : List[str] = None
A__ : Union[str, Any] = None
if self.use_labels:
A__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : Any = ids_tensor([self.batch_size] , self.num_choices )
A__ : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : Any = self.get_config()
A__ : List[str] = 300
return config
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : Tuple = self.prepare_config_and_inputs()
A__ : List[str] = True
A__ : List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A__ : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _UpperCamelCase ( self : Any , snake_case : Any , snake_case : Tuple , snake_case : Any , snake_case : Optional[int] , snake_case : Optional[int] , snake_case : Optional[int] , snake_case : Dict ):
'''simple docstring'''
A__ : List[str] = MraModel(config=snake_case )
model.to(snake_case )
model.eval()
A__ : Dict = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )
A__ : List[str] = model(snake_case , token_type_ids=snake_case )
A__ : Union[str, Any] = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : Optional[Any] , snake_case : List[Any] , snake_case : Any , snake_case : Optional[Any] , snake_case : Union[str, Any] , snake_case : Tuple , snake_case : Dict , snake_case : str , snake_case : Dict , snake_case : str , ):
'''simple docstring'''
A__ : Dict = True
A__ : Optional[Any] = MraModel(snake_case )
model.to(snake_case )
model.eval()
A__ : Union[str, Any] = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , )
A__ : str = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , encoder_hidden_states=snake_case , )
A__ : Optional[int] = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : int , snake_case : Optional[Any] , snake_case : Optional[int] , snake_case : int , snake_case : str , snake_case : Union[str, Any] , snake_case : Dict , snake_case : List[str] ):
'''simple docstring'''
A__ : Union[str, Any] = MraForMaskedLM(config=snake_case )
model.to(snake_case )
model.eval()
A__ : List[Any] = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self : Optional[Any] , snake_case : Dict , snake_case : Dict , snake_case : Dict , snake_case : List[str] , snake_case : List[str] , snake_case : Tuple , snake_case : Union[str, Any] ):
'''simple docstring'''
A__ : Dict = MraForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
A__ : str = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , start_positions=snake_case , end_positions=snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self : Tuple , snake_case : List[Any] , snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : List[str] , snake_case : Optional[int] , snake_case : List[str] , snake_case : Union[str, Any] ):
'''simple docstring'''
A__ : str = self.num_labels
A__ : Optional[Any] = MraForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
A__ : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self : Union[str, Any] , snake_case : Dict , snake_case : str , snake_case : List[Any] , snake_case : Any , snake_case : Dict , snake_case : Tuple , snake_case : Optional[Any] ):
'''simple docstring'''
A__ : str = self.num_labels
A__ : Union[str, Any] = MraForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
A__ : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self : Tuple , snake_case : Optional[Any] , snake_case : Optional[int] , snake_case : int , snake_case : Optional[Any] , snake_case : List[str] , snake_case : Dict , snake_case : Optional[Any] ):
'''simple docstring'''
A__ : List[str] = self.num_choices
A__ : str = MraForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
A__ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : str = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : List[str] = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : Dict = config_and_inputs
A__ : Optional[int] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase ):
snake_case_ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = ()
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : Optional[Any] = MraModelTester(self )
A__ : List[str] = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A__ : List[str] = type
self.model_tester.create_and_check_model(*snake_case )
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case )
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case )
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
A__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case )
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case )
@slow
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : str = MraModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@unittest.skip(reason="""MRA does not output attentions""" )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
return
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : str = MraModel.from_pretrained("""uw-madison/mra-base-512-4""" )
A__ : Any = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
A__ : List[Any] = model(snake_case )[0]
A__ : List[Any] = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , snake_case )
A__ : int = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) )
@slow
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : Union[str, Any] = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-512-4""" )
A__ : Tuple = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
A__ : List[Any] = model(snake_case )[0]
A__ : Dict = 5_0265
A__ : List[str] = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , snake_case )
A__ : List[Any] = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) )
@slow
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
A__ : Any = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-4096-8-d3""" )
A__ : List[Any] = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
A__ : List[Any] = model(snake_case )[0]
A__ : Union[str, Any] = 5_0265
A__ : Optional[Any] = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape , snake_case )
A__ : Optional[int] = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) )
| 296
| 1
|
"""simple docstring"""
def _lowerCAmelCase ( UpperCAmelCase__ : int ) ->int:
if not isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
raise ValueError("""Input must be an integer""" )
if input_num <= 0:
raise ValueError("""Input must be positive""" )
return sum(
divisor for divisor in range(1, input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 296
|
"""simple docstring"""
from sklearn.metrics import mean_squared_error
import datasets
A_ = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
A_ = '''\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
'''
A_ = '''
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric("mse")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{\'mse\': 0.6123724356957945}
If you\'re using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric("mse", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mse\': array([0.41666667, 1. ])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"""
] , )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("""float""" ) ),
"references": datasets.Sequence(datasets.Value("""float""" ) ),
}
else:
return {
"predictions": datasets.Value("""float""" ),
"references": datasets.Value("""float""" ),
}
def _UpperCamelCase ( self : List[str] , snake_case : Dict , snake_case : List[Any] , snake_case : List[str]=None , snake_case : List[Any]="uniform_average" , snake_case : int=True ):
'''simple docstring'''
A__ : Optional[int] = mean_squared_error(
snake_case , snake_case , sample_weight=snake_case , multioutput=snake_case , squared=snake_case )
return {"mse": mse}
| 296
| 1
|
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
A_ = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
A_ = {'''facebook/blenderbot_small-90M''': 512}
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[Any] ) ->Tuple:
A__ : str = set()
A__ : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A__ : List[str] = char
A__ : List[str] = set(UpperCAmelCase__ )
return pairs
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ['input_ids', 'attention_mask']
def __init__( self : Any , snake_case : Any , snake_case : Optional[int] , snake_case : List[Any]="__start__" , snake_case : str="__end__" , snake_case : str="__unk__" , snake_case : List[str]="__null__" , **snake_case : List[str] , ):
'''simple docstring'''
super().__init__(unk_token=snake_case , bos_token=snake_case , eos_token=snake_case , pad_token=snake_case , **snake_case )
with open(snake_case , encoding="""utf-8""" ) as vocab_handle:
A__ : Tuple = json.load(snake_case )
A__ : List[str] = {v: k for k, v in self.encoder.items()}
with open(snake_case , encoding="""utf-8""" ) as merges_handle:
A__ : Union[str, Any] = merges_handle.read().split("""\n""" )[1:-1]
A__ : Optional[Any] = [tuple(merge.split() ) for merge in merges]
A__ : List[str] = dict(zip(snake_case , range(len(snake_case ) ) ) )
A__ : Optional[int] = {}
@property
def _UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return len(self.encoder )
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def _UpperCamelCase ( self : Union[str, Any] , snake_case : str ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
A__ : Dict = re.sub("""([.,!?()])""" , r""" \1""" , snake_case )
A__ : List[str] = re.sub("""(')""" , r""" \1 """ , snake_case )
A__ : Optional[Any] = re.sub(r"""\s{2,}""" , """ """ , snake_case )
if "\n" in token:
A__ : Optional[int] = token.replace("""\n""" , """ __newln__""" )
A__ : Tuple = token.split(""" """ )
A__ : List[Any] = []
for token in tokens:
if not len(snake_case ):
continue
A__ : Optional[Any] = token.lower()
A__ : List[Any] = tuple(snake_case )
A__ : int = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
A__ : Any = get_pairs(snake_case )
if not pairs:
words.append(snake_case )
continue
while True:
A__ : Optional[Any] = min(snake_case , key=lambda snake_case : self.bpe_ranks.get(snake_case , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
A__ , A__ : List[Any] = bigram
A__ : Dict = []
A__ : List[str] = 0
while i < len(snake_case ):
try:
A__ : int = word.index(snake_case , snake_case )
new_word.extend(word[i:j] )
A__ : Tuple = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(snake_case ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A__ : Any = tuple(snake_case )
A__ : Optional[int] = new_word
if len(snake_case ) == 1:
break
else:
A__ : str = get_pairs(snake_case )
A__ : Dict = """@@ """.join(snake_case )
A__ : str = word[:-4]
A__ : Dict = word
words.append(snake_case )
return " ".join(snake_case )
def _UpperCamelCase ( self : Optional[int] , snake_case : str ):
'''simple docstring'''
A__ : str = []
A__ : Union[str, Any] = re.findall(r"""\S+\n?""" , snake_case )
for token in words:
split_tokens.extend(list(self.bpe(snake_case ).split(""" """ ) ) )
return split_tokens
def _UpperCamelCase ( self : List[Any] , snake_case : str ):
'''simple docstring'''
A__ : Optional[Any] = token.lower()
return self.encoder.get(snake_case , self.encoder.get(self.unk_token ) )
def _UpperCamelCase ( self : List[str] , snake_case : int ):
'''simple docstring'''
return self.decoder.get(snake_case , self.unk_token )
def _UpperCamelCase ( self : List[Any] , snake_case : List[str] ):
'''simple docstring'''
A__ : Union[str, Any] = """ """.join(snake_case ).replace("""@@ """ , """""" ).strip()
return out_string
def _UpperCamelCase ( self : Tuple , snake_case : str , snake_case : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(snake_case ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A__ : Union[str, Any] = os.path.join(
snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
A__ : Any = os.path.join(
snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(snake_case , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case , ensure_ascii=snake_case ) + """\n""" )
A__ : Tuple = 0
with open(snake_case , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
A__ : Tuple = token_index
writer.write(""" """.join(snake_case ) + """\n""" )
index += 1
return vocab_file, merge_file
| 296
|
"""simple docstring"""
import warnings
from ..trainer import Trainer
from ..utils import logging
A_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
def __init__( self : Optional[int] , snake_case : List[str]=None , **snake_case : Any ):
'''simple docstring'''
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""" , snake_case , )
super().__init__(args=snake_case , **snake_case )
| 296
| 1
|
"""simple docstring"""
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def _lowerCAmelCase ( UpperCAmelCase__ : Sequence[float], UpperCAmelCase__ : int, UpperCAmelCase__ : int ) ->tuple[int | None, int | None, float]:
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
A__ : Optional[int] = (low + high) // 2
A__ , A__ , A__ : List[Any] = max_subarray(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
A__ , A__ , A__ : Union[str, Any] = max_subarray(UpperCAmelCase__, mid + 1, UpperCAmelCase__ )
A__ , A__ , A__ : Union[str, Any] = max_cross_sum(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def _lowerCAmelCase ( UpperCAmelCase__ : Sequence[float], UpperCAmelCase__ : int, UpperCAmelCase__ : int, UpperCAmelCase__ : int ) ->tuple[int, int, float]:
A__ , A__ : Dict = float("""-inf""" ), -1
A__ , A__ : Optional[Any] = float("""-inf""" ), -1
A__ : int | float = 0
for i in range(UpperCAmelCase__, low - 1, -1 ):
summ += arr[i]
if summ > left_sum:
A__ : Optional[int] = summ
A__ : Union[str, Any] = i
A__ : Optional[Any] = 0
for i in range(mid + 1, high + 1 ):
summ += arr[i]
if summ > right_sum:
A__ : int = summ
A__ : Union[str, Any] = i
return max_left, max_right, (left_sum + right_sum)
def _lowerCAmelCase ( UpperCAmelCase__ : int ) ->float:
A__ : Union[str, Any] = [randint(1, UpperCAmelCase__ ) for _ in range(UpperCAmelCase__ )]
A__ : Any = time.time()
max_subarray(UpperCAmelCase__, 0, input_size - 1 )
A__ : List[Any] = time.time()
return end - start
def _lowerCAmelCase ( ) ->None:
A__ : List[Any] = [1_0, 1_0_0, 1_0_0_0, 1_0_0_0_0, 5_0_0_0_0, 1_0_0_0_0_0, 2_0_0_0_0_0, 3_0_0_0_0_0, 4_0_0_0_0_0, 5_0_0_0_0_0]
A__ : Any = [time_max_subarray(UpperCAmelCase__ ) for input_size in input_sizes]
print("""No of Inputs\t\tTime Taken""" )
for input_size, runtime in zip(UpperCAmelCase__, UpperCAmelCase__ ):
print(UpperCAmelCase__, """\t\t""", UpperCAmelCase__ )
plt.plot(UpperCAmelCase__, UpperCAmelCase__ )
plt.xlabel("""Number of Inputs""" )
plt.ylabel("""Time taken in seconds""" )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 296
|
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
A_ = random.Random()
def _lowerCAmelCase ( UpperCAmelCase__ : Dict, UpperCAmelCase__ : Tuple=1.0, UpperCAmelCase__ : Optional[int]=None, UpperCAmelCase__ : str=None ) ->Union[str, Any]:
if rng is None:
A__ : Optional[int] = global_rng
A__ : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Union[str, Any] , snake_case : str , snake_case : List[str]=7 , snake_case : str=400 , snake_case : Optional[Any]=2000 , snake_case : Union[str, Any]=10 , snake_case : str=160 , snake_case : List[str]=8 , snake_case : List[Any]=0.0 , snake_case : Optional[Any]=4000 , snake_case : Any=False , snake_case : int=True , ):
'''simple docstring'''
A__ : Any = parent
A__ : str = batch_size
A__ : List[str] = min_seq_length
A__ : Dict = max_seq_length
A__ : str = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A__ : Dict = padding_value
A__ : Optional[Any] = sampling_rate
A__ : Any = return_attention_mask
A__ : Optional[int] = do_normalize
A__ : Tuple = feature_size
A__ : Optional[Any] = chunk_length
A__ : Union[str, Any] = hop_length
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _UpperCamelCase ( self : Union[str, Any] , snake_case : Dict=False , snake_case : Optional[Any]=False ):
'''simple docstring'''
def _flatten(snake_case : Dict ):
return list(itertools.chain(*snake_case ) )
if equal_length:
A__ : Dict = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
A__ : Optional[int] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
A__ : List[str] = [np.asarray(snake_case ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase ):
snake_case_ = WhisperFeatureExtractor if is_speech_available() else None
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
A__ : str = WhisperFeatureExtractionTester(self )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : List[Any] = feat_extract_first.save_pretrained(snake_case )[0]
check_json_file_has_correct_format(snake_case )
A__ : Union[str, Any] = self.feature_extraction_class.from_pretrained(snake_case )
A__ : str = feat_extract_first.to_dict()
A__ : Union[str, Any] = feat_extract_second.to_dict()
A__ : List[Any] = feat_extract_first.mel_filters
A__ : Optional[Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(snake_case , snake_case ) )
self.assertEqual(snake_case , snake_case )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Any = os.path.join(snake_case , """feat_extract.json""" )
feat_extract_first.to_json_file(snake_case )
A__ : int = self.feature_extraction_class.from_json_file(snake_case )
A__ : Dict = feat_extract_first.to_dict()
A__ : str = feat_extract_second.to_dict()
A__ : str = feat_extract_first.mel_filters
A__ : Dict = feat_extract_second.mel_filters
self.assertTrue(np.allclose(snake_case , snake_case ) )
self.assertEqual(snake_case , snake_case )
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
A__ : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
A__ : str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
A__ : Union[str, Any] = [np.asarray(snake_case ) for speech_input in speech_inputs]
# Test feature size
A__ : Dict = feature_extractor(snake_case , padding="""max_length""" , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
A__ : str = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
A__ : Optional[int] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(snake_case , snake_case , atol=1e-3 ) )
# Test batched
A__ : List[str] = feature_extractor(snake_case , return_tensors="""np""" ).input_features
A__ : List[str] = feature_extractor(snake_case , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(snake_case , snake_case ):
self.assertTrue(np.allclose(snake_case , snake_case , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
A__ : Tuple = [floats_list((1, x) )[0] for x in (800, 800, 800)]
A__ : str = np.asarray(snake_case )
A__ : List[str] = feature_extractor(snake_case , return_tensors="""np""" ).input_features
A__ : Optional[int] = feature_extractor(snake_case , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(snake_case , snake_case ):
self.assertTrue(np.allclose(snake_case , snake_case , atol=1e-3 ) )
# Test truncation required
A__ : Optional[Any] = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
A__ : Union[str, Any] = [np.asarray(snake_case ) for speech_input in speech_inputs]
A__ : Union[str, Any] = [x[: feature_extractor.n_samples] for x in speech_inputs]
A__ : str = [np.asarray(snake_case ) for speech_input in speech_inputs_truncated]
A__ : Optional[int] = feature_extractor(snake_case , return_tensors="""np""" ).input_features
A__ : str = feature_extractor(snake_case , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(snake_case , snake_case ):
self.assertTrue(np.allclose(snake_case , snake_case , atol=1e-3 ) )
def _UpperCamelCase ( self : str ):
'''simple docstring'''
import torch
A__ : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ : List[str] = np.random.rand(100 , 32 ).astype(np.floataa )
A__ : Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
A__ : Optional[Any] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
A__ : Optional[int] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def _UpperCamelCase ( self : Optional[Any] , snake_case : Optional[int] ):
'''simple docstring'''
A__ : int = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
A__ : Union[str, Any] = ds.sort("""id""" ).select(range(snake_case ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : str = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
A__ : Optional[Any] = self._load_datasamples(1 )
A__ : Union[str, Any] = WhisperFeatureExtractor()
A__ : List[str] = feature_extractor(snake_case , return_tensors="""pt""" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , snake_case , atol=1e-4 ) )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ : Union[str, Any] = self._load_datasamples(1 )[0]
A__ : Any = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
A__ : str = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=snake_case )[0]
self.assertTrue(np.all(np.mean(snake_case ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(snake_case ) - 1 ) < 1e-3 ) )
| 296
| 1
|
"""simple docstring"""
from typing import List
from .keymap import KEYMAP, get_character
def _lowerCAmelCase ( UpperCAmelCase__ : str ) ->Optional[Any]:
def decorator(UpperCAmelCase__ : List[str] ):
A__ : Union[str, Any] = getattr(UpperCAmelCase__, """handle_key""", [] )
handle += [key]
setattr(UpperCAmelCase__, """handle_key""", UpperCAmelCase__ )
return func
return decorator
def _lowerCAmelCase ( *UpperCAmelCase__ : List[str] ) ->List[Any]:
def decorator(UpperCAmelCase__ : List[Any] ):
A__ : Optional[int] = getattr(UpperCAmelCase__, """handle_key""", [] )
handle += keys
setattr(UpperCAmelCase__, """handle_key""", UpperCAmelCase__ )
return func
return decorator
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
def __new__( cls : Tuple , snake_case : Optional[Any] , snake_case : List[Any] , snake_case : List[Any] ):
'''simple docstring'''
A__ : Union[str, Any] = super().__new__(cls , snake_case , snake_case , snake_case )
if not hasattr(snake_case , """key_handler""" ):
setattr(snake_case , """key_handler""" , {} )
setattr(snake_case , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
A__ : Tuple = getattr(snake_case , """handle_key""" , [] )
for key in handled_keys:
A__ : str = value
return new_cls
@staticmethod
def _UpperCamelCase ( cls : str ):
'''simple docstring'''
A__ : Tuple = get_character()
if char != KEYMAP["undefined"]:
A__ : int = ord(snake_case )
A__ : Optional[Any] = cls.key_handler.get(snake_case )
if handler:
A__ : Union[str, Any] = char
return handler(cls )
else:
return None
def _lowerCAmelCase ( cls : List[str] ) ->Tuple:
return KeyHandler(cls.__name__, cls.__bases__, cls.__dict__.copy() )
| 296
|
"""simple docstring"""
import numpy as np
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[str] ):
'''simple docstring'''
A__ : Optional[int] = (0, 0)
A__ : Dict = None
A__ : int = 0
A__ : str = 0
A__ : Optional[Any] = 0
def __eq__( self : str , snake_case : Optional[int] ):
'''simple docstring'''
return self.position == cell.position
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
print(self.position )
class __SCREAMING_SNAKE_CASE :
def __init__( self : int , snake_case : Any=(5, 5) ):
'''simple docstring'''
A__ : Optional[int] = np.zeros(snake_case )
A__ : List[Any] = world_size[0]
A__ : Dict = world_size[1]
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
print(self.w )
def _UpperCamelCase ( self : Optional[int] , snake_case : List[Any] ):
'''simple docstring'''
A__ : int = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
A__ : int = cell.position[0]
A__ : str = cell.position[1]
A__ : Any = []
for n in neughbour_cord:
A__ : List[Any] = current_x + n[0]
A__ : Tuple = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
A__ : List[Any] = Cell()
A__ : str = (x, y)
A__ : Optional[Any] = cell
neighbours.append(snake_case )
return neighbours
def _lowerCAmelCase ( UpperCAmelCase__ : List[str], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Dict ) ->Dict:
A__ : Union[str, Any] = []
A__ : Optional[int] = []
_open.append(UpperCAmelCase__ )
while _open:
A__ : List[Any] = np.argmin([n.f for n in _open] )
A__ : Union[str, Any] = _open[min_f]
_closed.append(_open.pop(UpperCAmelCase__ ) )
if current == goal:
break
for n in world.get_neigbours(UpperCAmelCase__ ):
for c in _closed:
if c == n:
continue
A__ : Dict = current.g + 1
A__ , A__ : int = n.position
A__ , A__ : Optional[int] = goal.position
A__ : Union[str, Any] = (ya - ya) ** 2 + (xa - xa) ** 2
A__ : Optional[int] = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(UpperCAmelCase__ )
A__ : List[str] = []
while current.parent is not None:
path.append(current.position )
A__ : Union[str, Any] = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
A_ = Gridworld()
# Start position and goal
A_ = Cell()
A_ = (0, 0)
A_ = Cell()
A_ = (4, 4)
print(F'path from {start.position} to {goal.position}')
A_ = astar(world, start, goal)
# Just for visual reasons.
for i in s:
A_ = 1
print(world.w)
| 296
| 1
|
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'''microsoft/git-base''': '''https://huggingface.co/microsoft/git-base/resolve/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = 'git_vision_model'
def __init__( self : Union[str, Any] , snake_case : Any=768 , snake_case : Optional[int]=3072 , snake_case : Tuple=12 , snake_case : Any=12 , snake_case : Dict=3 , snake_case : Dict=224 , snake_case : List[str]=16 , snake_case : Optional[int]="quick_gelu" , snake_case : Optional[int]=1e-5 , snake_case : Tuple=0.0 , snake_case : Optional[int]=0.02 , **snake_case : Optional[Any] , ):
'''simple docstring'''
super().__init__(**snake_case )
A__ : Dict = hidden_size
A__ : Optional[Any] = intermediate_size
A__ : List[str] = num_hidden_layers
A__ : Optional[Any] = num_attention_heads
A__ : Any = num_channels
A__ : Tuple = patch_size
A__ : Any = image_size
A__ : str = initializer_range
A__ : Any = attention_dropout
A__ : List[str] = layer_norm_eps
A__ : Tuple = hidden_act
@classmethod
def _UpperCamelCase ( cls : str , snake_case : Union[str, os.PathLike] , **snake_case : Any ):
'''simple docstring'''
cls._set_token_in_kwargs(snake_case )
A__ , A__ : Any = cls.get_config_dict(snake_case , **snake_case )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("""model_type""" ) == "git":
A__ : Tuple = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(snake_case , **snake_case )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = 'git'
def __init__( self : Any , snake_case : Tuple=None , snake_case : int=3_0522 , snake_case : Optional[int]=768 , snake_case : int=6 , snake_case : int=12 , snake_case : Optional[Any]=3072 , snake_case : Dict="gelu" , snake_case : str=0.1 , snake_case : int=0.1 , snake_case : Dict=1024 , snake_case : Optional[Any]=0.02 , snake_case : Optional[int]=1e-12 , snake_case : int=0 , snake_case : int="absolute" , snake_case : List[str]=True , snake_case : List[str]=False , snake_case : List[str]=101 , snake_case : int=102 , snake_case : Tuple=None , **snake_case : Optional[Any] , ):
'''simple docstring'''
super().__init__(bos_token_id=snake_case , eos_token_id=snake_case , pad_token_id=snake_case , **snake_case )
if vision_config is None:
A__ : Any = {}
logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" )
A__ : Union[str, Any] = GitVisionConfig(**snake_case )
A__ : Any = vocab_size
A__ : Optional[int] = hidden_size
A__ : List[str] = num_hidden_layers
A__ : Dict = num_attention_heads
A__ : List[Any] = hidden_act
A__ : Tuple = intermediate_size
A__ : str = hidden_dropout_prob
A__ : List[Any] = attention_probs_dropout_prob
A__ : Any = max_position_embeddings
A__ : Union[str, Any] = initializer_range
A__ : Union[str, Any] = layer_norm_eps
A__ : Dict = position_embedding_type
A__ : List[str] = use_cache
A__ : List[Any] = tie_word_embeddings
A__ : List[str] = num_image_with_embedding
A__ : str = bos_token_id
A__ : int = eos_token_id
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
A__ : Union[str, Any] = copy.deepcopy(self.__dict__ )
A__ : Union[str, Any] = self.vision_config.to_dict()
A__ : str = self.__class__.model_type
return output
| 296
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
def _lowerCAmelCase ( UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Tuple=False ) ->str:
A__ : Optional[int] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'blocks.{i}.norm1.weight', f'deit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'blocks.{i}.norm1.bias', f'deit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((f'blocks.{i}.attn.proj.weight', f'deit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((f'blocks.{i}.attn.proj.bias', f'deit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'blocks.{i}.norm2.weight', f'deit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'blocks.{i}.norm2.bias', f'deit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'deit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'deit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'deit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'deit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """deit.embeddings.cls_token"""),
("""dist_token""", """deit.embeddings.distillation_token"""),
("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """deit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
A__ : Optional[int] = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("""norm.weight""", """deit.layernorm.weight"""),
("""norm.bias""", """deit.layernorm.bias"""),
("""head.weight""", """cls_classifier.weight"""),
("""head.bias""", """cls_classifier.bias"""),
("""head_dist.weight""", """distillation_classifier.weight"""),
("""head_dist.bias""", """distillation_classifier.bias"""),
] )
return rename_keys
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Tuple, UpperCAmelCase__ : List[Any]=False ) ->str:
for i in range(config.num_hidden_layers ):
if base_model:
A__ : Any = """"""
else:
A__ : Tuple = """deit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ : Any = state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
A__ : Tuple = state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
A__ : List[Any] = in_proj_weight[
: config.hidden_size, :
]
A__ : str = in_proj_bias[: config.hidden_size]
A__ : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
A__ : Any = in_proj_bias[-config.hidden_size :]
def _lowerCAmelCase ( UpperCAmelCase__ : List[Any], UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Union[str, Any] ) ->Any:
A__ : int = dct.pop(UpperCAmelCase__ )
A__ : Tuple = val
def _lowerCAmelCase ( ) ->List[Any]:
A__ : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A__ : int = Image.open(requests.get(UpperCAmelCase__, stream=UpperCAmelCase__ ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase ( UpperCAmelCase__ : Dict, UpperCAmelCase__ : Any ) ->Tuple:
A__ : List[Any] = DeiTConfig()
# all deit models have fine-tuned heads
A__ : Tuple = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
A__ : str = 1_0_0_0
A__ : List[str] = """huggingface/label-files"""
A__ : Dict = """imagenet-1k-id2label.json"""
A__ : List[str] = json.load(open(hf_hub_download(UpperCAmelCase__, UpperCAmelCase__, repo_type="""dataset""" ), """r""" ) )
A__ : Dict = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()}
A__ : Optional[int] = idalabel
A__ : Dict = {v: k for k, v in idalabel.items()}
A__ : List[str] = int(deit_name[-6:-4] )
A__ : str = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("""tiny""" ):
A__ : List[str] = 1_9_2
A__ : int = 7_6_8
A__ : List[Any] = 1_2
A__ : Dict = 3
elif deit_name[9:].startswith("""small""" ):
A__ : List[Any] = 3_8_4
A__ : List[str] = 1_5_3_6
A__ : Any = 1_2
A__ : Union[str, Any] = 6
if deit_name[9:].startswith("""base""" ):
pass
elif deit_name[4:].startswith("""large""" ):
A__ : int = 1_0_2_4
A__ : str = 4_0_9_6
A__ : Any = 2_4
A__ : int = 1_6
# load original model from timm
A__ : Dict = timm.create_model(UpperCAmelCase__, pretrained=UpperCAmelCase__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A__ : Tuple = timm_model.state_dict()
A__ : str = create_rename_keys(UpperCAmelCase__, UpperCAmelCase__ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
read_in_q_k_v(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
# load HuggingFace model
A__ : str = DeiTForImageClassificationWithTeacher(UpperCAmelCase__ ).eval()
model.load_state_dict(UpperCAmelCase__ )
# Check outputs on an image, prepared by DeiTImageProcessor
A__ : int = int(
(2_5_6 / 2_2_4) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
A__ : Any = DeiTImageProcessor(size=UpperCAmelCase__, crop_size=config.image_size )
A__ : Union[str, Any] = image_processor(images=prepare_img(), return_tensors="""pt""" )
A__ : Optional[Any] = encoding["""pixel_values"""]
A__ : Union[str, Any] = model(UpperCAmelCase__ )
A__ : Union[str, Any] = timm_model(UpperCAmelCase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(UpperCAmelCase__, outputs.logits, atol=1e-3 )
Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ )
print(f'Saving model {deit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCAmelCase__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--deit_name''',
default='''vit_deit_base_distilled_patch16_224''',
type=str,
help='''Name of the DeiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
A_ = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 296
| 1
|
"""simple docstring"""
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def _lowerCAmelCase ( ) ->str:
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
A__ : Any = """__test_patch_submodule_mock__"""
with patch_submodule(_test_patching, """os.path.join""", UpperCAmelCase__ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os, _PatchedModuleObj )
assert isinstance(_test_patching.os.path, _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path, _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os, _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path, _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path, _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def _lowerCAmelCase ( ) ->Any:
assert _test_patching.open is open
A__ : int = """__test_patch_submodule_builtin_mock__"""
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching, """open""", UpperCAmelCase__ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def _lowerCAmelCase ( ) ->Optional[Any]:
# pandas.read_csv is not present in _test_patching
A__ : Tuple = """__test_patch_submodule_missing_mock__"""
with patch_submodule(_test_patching, """pandas.read_csv""", UpperCAmelCase__ ):
pass
def _lowerCAmelCase ( ) ->Any:
# builtin should always be mocked even if they're not in the globals
# in case they're loaded at one point
A__ : Optional[int] = """__test_patch_submodule_missing_builtin_mock__"""
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching, """len""", UpperCAmelCase__ ) is None
with patch_submodule(_test_patching, """len""", UpperCAmelCase__ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def _lowerCAmelCase ( ) ->int:
A__ : Dict = """__test_patch_submodule_start_and_stop_mock__"""
A__ : Tuple = patch_submodule(_test_patching, """open""", UpperCAmelCase__ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def _lowerCAmelCase ( ) ->Optional[int]:
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
A__ : int = """__test_patch_submodule_successive_join__"""
A__ : Any = """__test_patch_submodule_successive_dirname__"""
A__ : List[str] = """__test_patch_submodule_successive_rename__"""
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching, """os.path.join""", UpperCAmelCase__ ):
with patch_submodule(_test_patching, """os.rename""", UpperCAmelCase__ ):
with patch_submodule(_test_patching, """os.path.dirname""", UpperCAmelCase__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching, """os.rename""", UpperCAmelCase__ ):
with patch_submodule(_test_patching, """os.path.join""", UpperCAmelCase__ ):
with patch_submodule(_test_patching, """os.path.dirname""", UpperCAmelCase__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def _lowerCAmelCase ( ) ->Dict:
A__ : Tuple = """__test_patch_submodule_doesnt_exist_mock__"""
with patch_submodule(_test_patching, """__module_that_doesn_exist__.__attribute_that_doesn_exist__""", UpperCAmelCase__ ):
pass
with patch_submodule(_test_patching, """os.__attribute_that_doesn_exist__""", UpperCAmelCase__ ):
pass
| 296
|
"""simple docstring"""
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def _lowerCAmelCase ( UpperCAmelCase__ : Sequence[float], UpperCAmelCase__ : int, UpperCAmelCase__ : int ) ->tuple[int | None, int | None, float]:
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
A__ : Optional[int] = (low + high) // 2
A__ , A__ , A__ : List[Any] = max_subarray(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
A__ , A__ , A__ : Union[str, Any] = max_subarray(UpperCAmelCase__, mid + 1, UpperCAmelCase__ )
A__ , A__ , A__ : Union[str, Any] = max_cross_sum(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def _lowerCAmelCase ( UpperCAmelCase__ : Sequence[float], UpperCAmelCase__ : int, UpperCAmelCase__ : int, UpperCAmelCase__ : int ) ->tuple[int, int, float]:
A__ , A__ : Dict = float("""-inf""" ), -1
A__ , A__ : Optional[Any] = float("""-inf""" ), -1
A__ : int | float = 0
for i in range(UpperCAmelCase__, low - 1, -1 ):
summ += arr[i]
if summ > left_sum:
A__ : Optional[int] = summ
A__ : Union[str, Any] = i
A__ : Optional[Any] = 0
for i in range(mid + 1, high + 1 ):
summ += arr[i]
if summ > right_sum:
A__ : int = summ
A__ : Union[str, Any] = i
return max_left, max_right, (left_sum + right_sum)
def _lowerCAmelCase ( UpperCAmelCase__ : int ) ->float:
A__ : Union[str, Any] = [randint(1, UpperCAmelCase__ ) for _ in range(UpperCAmelCase__ )]
A__ : Any = time.time()
max_subarray(UpperCAmelCase__, 0, input_size - 1 )
A__ : List[Any] = time.time()
return end - start
def _lowerCAmelCase ( ) ->None:
A__ : List[Any] = [1_0, 1_0_0, 1_0_0_0, 1_0_0_0_0, 5_0_0_0_0, 1_0_0_0_0_0, 2_0_0_0_0_0, 3_0_0_0_0_0, 4_0_0_0_0_0, 5_0_0_0_0_0]
A__ : Any = [time_max_subarray(UpperCAmelCase__ ) for input_size in input_sizes]
print("""No of Inputs\t\tTime Taken""" )
for input_size, runtime in zip(UpperCAmelCase__, UpperCAmelCase__ ):
print(UpperCAmelCase__, """\t\t""", UpperCAmelCase__ )
plt.plot(UpperCAmelCase__, UpperCAmelCase__ )
plt.xlabel("""Number of Inputs""" )
plt.ylabel("""Time taken in seconds""" )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 296
| 1
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __SCREAMING_SNAKE_CASE ( metaclass=UpperCamelCase ):
snake_case_ = ['onnx']
def __init__( self : str , *snake_case : Optional[int] , **snake_case : int ):
'''simple docstring'''
requires_backends(self , ["""onnx"""] )
@classmethod
def _UpperCamelCase ( cls : int , *snake_case : Optional[int] , **snake_case : List[str] ):
'''simple docstring'''
requires_backends(cls , ["""onnx"""] )
@classmethod
def _UpperCamelCase ( cls : Any , *snake_case : Dict , **snake_case : List[Any] ):
'''simple docstring'''
requires_backends(cls , ["""onnx"""] )
| 296
|
"""simple docstring"""
from __future__ import annotations
class __SCREAMING_SNAKE_CASE :
def __init__( self : Dict , snake_case : int ):
'''simple docstring'''
A__ : List[Any] = order
# a_{0} ... a_{k}
A__ : List[Any] = [1.0] + [0.0] * order
# b_{0} ... b_{k}
A__ : str = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
A__ : Union[str, Any] = [0.0] * self.order
# y[n-1] ... y[n-k]
A__ : List[str] = [0.0] * self.order
def _UpperCamelCase ( self : Optional[int] , snake_case : list[float] , snake_case : list[float] ):
'''simple docstring'''
if len(snake_case ) < self.order:
A__ : Any = [1.0, *a_coeffs]
if len(snake_case ) != self.order + 1:
A__ : str = (
F'Expected a_coeffs to have {self.order + 1} elements '
F'for {self.order}-order filter, got {len(snake_case )}'
)
raise ValueError(snake_case )
if len(snake_case ) != self.order + 1:
A__ : Union[str, Any] = (
F'Expected b_coeffs to have {self.order + 1} elements '
F'for {self.order}-order filter, got {len(snake_case )}'
)
raise ValueError(snake_case )
A__ : Dict = a_coeffs
A__ : Any = b_coeffs
def _UpperCamelCase ( self : List[str] , snake_case : float ):
'''simple docstring'''
A__ : str = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
A__ : Dict = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
A__ : Tuple = self.input_history[:-1]
A__ : int = self.output_history[:-1]
A__ : Dict = sample
A__ : Tuple = result
return result
| 296
| 1
|
"""simple docstring"""
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
A_ = logging.get_logger(__name__)
A_ = {
'''tensor(bool)''': np.bool_,
'''tensor(int8)''': np.inta,
'''tensor(uint8)''': np.uinta,
'''tensor(int16)''': np.intaa,
'''tensor(uint16)''': np.uintaa,
'''tensor(int32)''': np.intaa,
'''tensor(uint32)''': np.uintaa,
'''tensor(int64)''': np.intaa,
'''tensor(uint64)''': np.uintaa,
'''tensor(float16)''': np.floataa,
'''tensor(float)''': np.floataa,
'''tensor(double)''': np.floataa,
}
class __SCREAMING_SNAKE_CASE :
def __init__( self : Any , snake_case : str=None , **snake_case : Tuple ):
'''simple docstring'''
logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""" )
A__ : Optional[Any] = model
A__ : Optional[int] = kwargs.get("""model_save_dir""" , snake_case )
A__ : str = kwargs.get("""latest_model_name""" , snake_case )
def __call__( self : str , **snake_case : Optional[int] ):
'''simple docstring'''
A__ : List[Any] = {k: np.array(snake_case ) for k, v in kwargs.items()}
return self.model.run(snake_case , snake_case )
@staticmethod
def _UpperCamelCase ( snake_case : Union[str, Path] , snake_case : str=None , snake_case : Any=None ):
'''simple docstring'''
if provider is None:
logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""" )
A__ : List[Any] = """CPUExecutionProvider"""
return ort.InferenceSession(snake_case , providers=[provider] , sess_options=snake_case )
def _UpperCamelCase ( self : Tuple , snake_case : Union[str, Path] , snake_case : Optional[str] = None , **snake_case : Dict ):
'''simple docstring'''
A__ : Optional[int] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
A__ : Optional[Any] = self.model_save_dir.joinpath(self.latest_model_name )
A__ : Optional[Any] = Path(snake_case ).joinpath(snake_case )
try:
shutil.copyfile(snake_case , snake_case )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
A__ : List[str] = self.model_save_dir.joinpath(snake_case )
if src_path.exists():
A__ : Tuple = Path(snake_case ).joinpath(snake_case )
try:
shutil.copyfile(snake_case , snake_case )
except shutil.SameFileError:
pass
def _UpperCamelCase ( self : List[Any] , snake_case : Union[str, os.PathLike] , **snake_case : Optional[int] , ):
'''simple docstring'''
if os.path.isfile(snake_case ):
logger.error(F'Provided path ({save_directory}) should be a directory, not a file' )
return
os.makedirs(snake_case , exist_ok=snake_case )
# saving model weights/files
self._save_pretrained(snake_case , **snake_case )
@classmethod
def _UpperCamelCase ( cls : List[str] , snake_case : Union[str, Path] , snake_case : Optional[Union[bool, str, None]] = None , snake_case : Optional[Union[str, None]] = None , snake_case : bool = False , snake_case : Optional[str] = None , snake_case : Optional[str] = None , snake_case : Optional[str] = None , snake_case : Optional["ort.SessionOptions"] = None , **snake_case : str , ):
'''simple docstring'''
A__ : Optional[Any] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(snake_case ):
A__ : Dict = OnnxRuntimeModel.load_model(
os.path.join(snake_case , snake_case ) , provider=snake_case , sess_options=snake_case )
A__ : Any = Path(snake_case )
# load model from hub
else:
# download model
A__ : Union[str, Any] = hf_hub_download(
repo_id=snake_case , filename=snake_case , use_auth_token=snake_case , revision=snake_case , cache_dir=snake_case , force_download=snake_case , )
A__ : List[Any] = Path(snake_case ).parent
A__ : Optional[int] = Path(snake_case ).name
A__ : Any = OnnxRuntimeModel.load_model(snake_case , provider=snake_case , sess_options=snake_case )
return cls(model=snake_case , **snake_case )
@classmethod
def _UpperCamelCase ( cls : Optional[int] , snake_case : Union[str, Path] , snake_case : bool = True , snake_case : Optional[str] = None , snake_case : Optional[str] = None , **snake_case : Optional[Any] , ):
'''simple docstring'''
A__ : Dict = None
if len(str(snake_case ).split("""@""" ) ) == 2:
A__ , A__ : str = model_id.split("""@""" )
return cls._from_pretrained(
model_id=snake_case , revision=snake_case , cache_dir=snake_case , force_download=snake_case , use_auth_token=snake_case , **snake_case , )
| 296
|
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[int] , snake_case : Optional[Any] , snake_case : Tuple=13 , snake_case : Dict=7 , snake_case : Optional[int]=True , snake_case : Union[str, Any]=True , snake_case : Dict=True , snake_case : Any=True , snake_case : List[str]=99 , snake_case : str=64 , snake_case : Optional[int]=5 , snake_case : str=4 , snake_case : List[Any]=37 , snake_case : Optional[Any]="gelu" , snake_case : List[str]=0.1 , snake_case : str=0.1 , snake_case : Optional[int]=512 , snake_case : Dict=16 , snake_case : List[Any]=2 , snake_case : Optional[int]=0.02 , snake_case : Any=3 , snake_case : Union[str, Any]=4 , snake_case : Dict=None , ):
'''simple docstring'''
A__ : Tuple = parent
A__ : Union[str, Any] = batch_size
A__ : List[str] = seq_length
A__ : Optional[int] = is_training
A__ : Dict = use_input_mask
A__ : Any = use_token_type_ids
A__ : Optional[Any] = use_labels
A__ : List[str] = vocab_size
A__ : Optional[int] = hidden_size
A__ : Optional[Any] = num_hidden_layers
A__ : Any = num_attention_heads
A__ : List[Any] = intermediate_size
A__ : Optional[Any] = hidden_act
A__ : Optional[int] = hidden_dropout_prob
A__ : Tuple = attention_probs_dropout_prob
A__ : str = max_position_embeddings
A__ : List[str] = type_vocab_size
A__ : Union[str, Any] = type_sequence_label_size
A__ : List[Any] = initializer_range
A__ : Optional[int] = num_labels
A__ : Dict = num_choices
A__ : Dict = scope
A__ : List[Any] = vocab_size - 1
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : List[Any] = None
if self.use_input_mask:
A__ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
A__ : Union[str, Any] = None
if self.use_labels:
A__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : Tuple = self.get_config()
return config, input_ids, input_mask, token_labels
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
A__ , A__ , A__ , A__ : str = self.prepare_config_and_inputs()
A__ : Union[str, Any] = True
return config, input_ids, input_mask, token_labels
def _UpperCamelCase ( self : Union[str, Any] , snake_case : Optional[int] , snake_case : List[str] , snake_case : int ):
'''simple docstring'''
A__ : Any = GPTNeoXModel(config=snake_case )
model.to(snake_case )
model.eval()
A__ : int = model(snake_case , attention_mask=snake_case )
A__ : Optional[int] = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : Union[str, Any] , snake_case : str , snake_case : Any , snake_case : Union[str, Any] ):
'''simple docstring'''
A__ : int = True
A__ : str = GPTNeoXModel(snake_case )
model.to(snake_case )
model.eval()
A__ : Tuple = model(snake_case , attention_mask=snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : Dict , snake_case : List[Any] , snake_case : str , snake_case : Optional[Any] , snake_case : Any ):
'''simple docstring'''
A__ : Any = GPTNeoXForCausalLM(config=snake_case )
model.to(snake_case )
model.eval()
A__ : Tuple = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self : List[str] , snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : Union[str, Any] , snake_case : Tuple ):
'''simple docstring'''
A__ : int = self.num_labels
A__ : int = GPTNeoXForQuestionAnswering(snake_case )
model.to(snake_case )
model.eval()
A__ : Optional[Any] = model(snake_case , attention_mask=snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self : str , snake_case : Tuple , snake_case : int , snake_case : int , snake_case : Dict ):
'''simple docstring'''
A__ : List[Any] = self.num_labels
A__ : Tuple = GPTNeoXForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
A__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : List[str] = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self : Any , snake_case : Union[str, Any] , snake_case : int , snake_case : Tuple , snake_case : Any ):
'''simple docstring'''
A__ : Tuple = self.num_labels
A__ : Any = GPTNeoXForTokenClassification(snake_case )
model.to(snake_case )
model.eval()
A__ : Dict = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self : List[str] , snake_case : List[str] , snake_case : Tuple , snake_case : Any ):
'''simple docstring'''
A__ : Optional[int] = True
A__ : Any = GPTNeoXForCausalLM(config=snake_case )
model.to(snake_case )
model.eval()
# first forward pass
A__ : Tuple = model(snake_case , attention_mask=snake_case , use_cache=snake_case )
A__ : str = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A__ : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
A__ : Any = torch.cat([input_ids, next_tokens] , dim=-1 )
A__ : Any = torch.cat([input_mask, next_mask] , dim=-1 )
A__ : Tuple = model(snake_case , attention_mask=snake_case , output_hidden_states=snake_case )
A__ : List[Any] = output_from_no_past["""hidden_states"""][0]
A__ : List[str] = model(
snake_case , attention_mask=snake_case , past_key_values=snake_case , output_hidden_states=snake_case , )["""hidden_states"""][0]
# select random slice
A__ : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A__ : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
A__ : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case , snake_case , atol=1e-3 ) )
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ : str = self.prepare_config_and_inputs()
A__ , A__ , A__ , A__ : Dict = config_and_inputs
A__ : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase ):
snake_case_ = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case_ = (GPTNeoXForCausalLM,) if is_torch_available() else ()
snake_case_ = (
{
'feature-extraction': GPTNeoXModel,
'question-answering': GPTNeoXForQuestionAnswering,
'text-classification': GPTNeoXForSequenceClassification,
'text-generation': GPTNeoXForCausalLM,
'token-classification': GPTNeoXForTokenClassification,
'zero-shot': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : Any = GPTNeoXModelTester(self )
A__ : Any = ConfigTester(self , config_class=snake_case , hidden_size=64 , num_attention_heads=8 )
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
A__ , A__ , A__ , A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(snake_case , snake_case , snake_case )
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
A__ , A__ , A__ , A__ : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(snake_case , snake_case , snake_case )
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
A__ , A__ , A__ , A__ : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
A__ : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(snake_case , snake_case , snake_case )
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ , A__ , A__ , A__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(snake_case , snake_case , snake_case )
def _UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
A__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*snake_case )
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case )
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case )
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case )
@unittest.skip(reason="""Feed forward chunking is not implemented""" )
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _UpperCamelCase ( self : Optional[Any] , snake_case : Optional[Any] ):
'''simple docstring'''
A__ , A__ : int = self.model_tester.prepare_config_and_inputs_for_common()
A__ : List[Any] = ids_tensor([1, 10] , config.vocab_size )
A__ : str = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A__ : Union[str, Any] = GPTNeoXModel(snake_case )
original_model.to(snake_case )
original_model.eval()
A__ : Optional[int] = original_model(snake_case ).last_hidden_state
A__ : List[str] = original_model(snake_case ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A__ : Optional[int] = {"""type""": scaling_type, """factor""": 10.0}
A__ : Optional[int] = GPTNeoXModel(snake_case )
scaled_model.to(snake_case )
scaled_model.eval()
A__ : List[str] = scaled_model(snake_case ).last_hidden_state
A__ : Tuple = scaled_model(snake_case ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case , snake_case , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(snake_case , snake_case , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case , snake_case , atol=1e-5 ) )
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : Any = AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
for checkpointing in [True, False]:
A__ : Optional[Any] = GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(snake_case )
A__ : Optional[Any] = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(snake_case )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
A__ : Union[str, Any] = """My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure"""
A__ : Tuple = model.generate(**snake_case , do_sample=snake_case , max_new_tokens=20 )
A__ : Tuple = tokenizer.batch_decode(snake_case )[0]
self.assertEqual(snake_case , snake_case )
| 296
| 1
|
"""simple docstring"""
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
A_ = namedtuple(
'''_TestCommandArgs''',
[
'''dataset''',
'''name''',
'''cache_dir''',
'''data_dir''',
'''all_configs''',
'''save_infos''',
'''ignore_verifications''',
'''force_redownload''',
'''clear_cache''',
],
defaults=[None, None, None, False, False, False, False, False],
)
def _lowerCAmelCase ( UpperCAmelCase__ : Any, UpperCAmelCase__ : Optional[int] ) ->Dict:
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def _lowerCAmelCase ( UpperCAmelCase__ : Tuple ) ->str:
A__ : Dict = _TestCommandArgs(dataset=UpperCAmelCase__, all_configs=UpperCAmelCase__, save_infos=UpperCAmelCase__ )
A__ : Optional[int] = TestCommand(*UpperCAmelCase__ )
test_command.run()
A__ : int = os.path.join(UpperCAmelCase__, """README.md""" )
assert os.path.exists(UpperCAmelCase__ )
A__ : Tuple = DatasetInfosDict.from_directory(UpperCAmelCase__ )
A__ : str = DatasetInfosDict(
{
"""default""": DatasetInfo(
features=Features(
{
"""tokens""": Sequence(Value("""string""" ) ),
"""ner_tags""": Sequence(
ClassLabel(names=["""O""", """B-PER""", """I-PER""", """B-ORG""", """I-ORG""", """B-LOC""", """I-LOC"""] ) ),
"""langs""": Sequence(Value("""string""" ) ),
"""spans""": Sequence(Value("""string""" ) ),
} ), splits=[
{
"""name""": """train""",
"""num_bytes""": 2_3_5_1_5_6_3,
"""num_examples""": 1_0_0_0_0,
},
{
"""name""": """validation""",
"""num_bytes""": 2_3_8_4_1_8,
"""num_examples""": 1_0_0_0,
},
], download_size=3_9_4_0_6_8_0, dataset_size=2_5_8_9_9_8_1, )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
A__ , A__ : List[str] = getattr(dataset_infos["""default"""], UpperCAmelCase__ ), getattr(expected_dataset_infos["""default"""], UpperCAmelCase__ )
if key == "num_bytes":
assert is_apercent_close(UpperCAmelCase__, UpperCAmelCase__ )
elif key == "splits":
assert list(UpperCAmelCase__ ) == list(UpperCAmelCase__ )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes, expected[split].num_bytes )
else:
result == expected
| 296
|
"""simple docstring"""
from collections import defaultdict
from math import gcd
def _lowerCAmelCase ( UpperCAmelCase__ : int = 1_5_0_0_0_0_0 ) ->int:
A__ : defaultdict = defaultdict(UpperCAmelCase__ )
A__ : Any = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1, UpperCAmelCase__, 2 ):
if gcd(UpperCAmelCase__, UpperCAmelCase__ ) > 1:
continue
A__ : str = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(UpperCAmelCase__, limit + 1, UpperCAmelCase__ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F'{solution() = }')
| 296
| 1
|
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
A_ = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = field(default=UpperCamelCase , metadata={'help': 'Whether to use SortishSampler or not.'} )
snake_case_ = field(
default=UpperCamelCase , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
snake_case_ = field(
default=UpperCamelCase , metadata={
'help': (
'The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `max_length` value of the model configuration.'
)
} , )
snake_case_ = field(
default=UpperCamelCase , metadata={
'help': (
'The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `num_beams` value of the model configuration.'
)
} , )
snake_case_ = field(
default=UpperCamelCase , metadata={
'help': 'Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'
} , )
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
A__ : str = super().to_dict()
for k, v in d.items():
if isinstance(snake_case , snake_case ):
A__ : Any = v.to_dict()
return d
| 296
|
"""simple docstring"""
import os
from distutils.util import strtobool
def _lowerCAmelCase ( UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Optional[Any] ) ->List[str]:
for e in env_keys:
A__ : List[Any] = int(os.environ.get(UpperCAmelCase__, -1 ) )
if val >= 0:
return val
return default
def _lowerCAmelCase ( UpperCAmelCase__ : Tuple, UpperCAmelCase__ : str=False ) ->List[str]:
A__ : List[Any] = os.environ.get(UpperCAmelCase__, str(UpperCAmelCase__ ) )
return strtobool(UpperCAmelCase__ ) == 1 # As its name indicates `strtobool` actually returns an int...
def _lowerCAmelCase ( UpperCAmelCase__ : Tuple, UpperCAmelCase__ : List[Any]="no" ) ->int:
A__ : str = os.environ.get(UpperCAmelCase__, str(UpperCAmelCase__ ) )
return value
| 296
| 1
|
"""simple docstring"""
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def _lowerCAmelCase ( UpperCAmelCase__ : np.ndarray, UpperCAmelCase__ : np.ndarray ) ->float:
return math.sqrt(sum(pow(a - b, 2 ) for a, b in zip(UpperCAmelCase__, UpperCAmelCase__ ) ) )
def _lowerCAmelCase ( UpperCAmelCase__ : np.ndarray, UpperCAmelCase__ : np.ndarray ) ->list[list[list[float] | float]]:
if dataset.ndim != value_array.ndim:
A__ : Optional[Any] = (
"""Wrong input data's dimensions... """
f'dataset : {dataset.ndim}, value_array : {value_array.ndim}'
)
raise ValueError(UpperCAmelCase__ )
try:
if dataset.shape[1] != value_array.shape[1]:
A__ : Optional[Any] = (
"""Wrong input data's shape... """
f'dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'
)
raise ValueError(UpperCAmelCase__ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("""Wrong shape""" )
if dataset.dtype != value_array.dtype:
A__ : Union[str, Any] = (
"""Input data have different datatype... """
f'dataset : {dataset.dtype}, value_array : {value_array.dtype}'
)
raise TypeError(UpperCAmelCase__ )
A__ : Dict = []
for value in value_array:
A__ : str = euclidean(UpperCAmelCase__, dataset[0] )
A__ : List[str] = dataset[0].tolist()
for dataset_value in dataset[1:]:
A__ : Optional[int] = euclidean(UpperCAmelCase__, UpperCAmelCase__ )
if dist > temp_dist:
A__ : int = temp_dist
A__ : Dict = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def _lowerCAmelCase ( UpperCAmelCase__ : np.ndarray, UpperCAmelCase__ : np.ndarray ) ->float:
return np.dot(UpperCAmelCase__, UpperCAmelCase__ ) / (norm(UpperCAmelCase__ ) * norm(UpperCAmelCase__ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 296
|
"""simple docstring"""
import cva
import numpy as np
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , snake_case : float , snake_case : int ):
'''simple docstring'''
if k in (0.04, 0.06):
A__ : Optional[int] = k
A__ : int = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : List[Any] ):
'''simple docstring'''
return str(self.k )
def _UpperCamelCase ( self : int , snake_case : str ):
'''simple docstring'''
A__ : List[str] = cva.imread(snake_case , 0 )
A__ , A__ : Union[str, Any] = img.shape
A__ : list[list[int]] = []
A__ : Optional[Any] = img.copy()
A__ : List[str] = cva.cvtColor(snake_case , cva.COLOR_GRAY2RGB )
A__ , A__ : List[Any] = np.gradient(snake_case )
A__ : List[Any] = dx**2
A__ : Any = dy**2
A__ : Dict = dx * dy
A__ : Any = 0.04
A__ : Optional[Any] = self.window_size // 2
for y in range(snake_case , h - offset ):
for x in range(snake_case , w - offset ):
A__ : List[str] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A__ : Tuple = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A__ : Optional[int] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A__ : int = (wxx * wyy) - (wxy**2)
A__ : Any = wxx + wyy
A__ : List[str] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
A_ = HarrisCorner(0.04, 3)
A_ , A_ = edge_detect.detect('''path_to_image''')
cva.imwrite('''detect.png''', color_img)
| 296
| 1
|
"""simple docstring"""
import qiskit
def _lowerCAmelCase ( UpperCAmelCase__ : int, UpperCAmelCase__ : int ) ->qiskit.result.counts.Counts:
A__ : Optional[Any] = qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
A__ : List[Any] = qiskit.QuantumCircuit(UpperCAmelCase__, UpperCAmelCase__ )
# Map the quantum measurement to the classical bits
circuit.measure([0], [0] )
# Execute the circuit on the simulator
A__ : int = qiskit.execute(UpperCAmelCase__, UpperCAmelCase__, shots=1_0_0_0 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(UpperCAmelCase__ )
if __name__ == "__main__":
print(F'Total count for various states are: {single_qubit_measure(1, 1)}')
| 296
|
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
A_ = logging.get_logger(__name__)
A_ = Dict[str, Any]
A_ = List[Prediction]
@add_end_docstrings(UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
def __init__( self : str , *snake_case : Tuple , **snake_case : Tuple ):
'''simple docstring'''
super().__init__(*snake_case , **snake_case )
if self.framework == "tf":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
requires_backends(self , """vision""" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def _UpperCamelCase ( self : List[Any] , **snake_case : Optional[int] ):
'''simple docstring'''
A__ : Dict = {}
if "threshold" in kwargs:
A__ : int = kwargs["""threshold"""]
return {}, {}, postprocess_kwargs
def __call__( self : Tuple , *snake_case : Union[str, Any] , **snake_case : Union[str, Any] ):
'''simple docstring'''
return super().__call__(*snake_case , **snake_case )
def _UpperCamelCase ( self : str , snake_case : int ):
'''simple docstring'''
A__ : List[str] = load_image(snake_case )
A__ : int = torch.IntTensor([[image.height, image.width]] )
A__ : Union[str, Any] = self.image_processor(images=[image] , return_tensors="""pt""" )
if self.tokenizer is not None:
A__ : str = self.tokenizer(text=inputs["""words"""] , boxes=inputs["""boxes"""] , return_tensors="""pt""" )
A__ : List[str] = target_size
return inputs
def _UpperCamelCase ( self : Optional[int] , snake_case : List[Any] ):
'''simple docstring'''
A__ : str = model_inputs.pop("""target_size""" )
A__ : Dict = self.model(**snake_case )
A__ : Optional[Any] = outputs.__class__({"""target_size""": target_size, **outputs} )
if self.tokenizer is not None:
A__ : str = model_inputs["""bbox"""]
return model_outputs
def _UpperCamelCase ( self : Tuple , snake_case : Optional[int] , snake_case : int=0.9 ):
'''simple docstring'''
A__ : Any = model_outputs["""target_size"""]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
A__ , A__ : Tuple = target_size[0].tolist()
def unnormalize(snake_case : Optional[int] ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
A__ , A__ : Optional[int] = model_outputs["""logits"""].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
A__ : Optional[Any] = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
A__ : List[str] = [unnormalize(snake_case ) for bbox in model_outputs["""bbox"""].squeeze(0 )]
A__ : Tuple = ["""score""", """label""", """box"""]
A__ : Any = [dict(zip(snake_case , snake_case ) ) for vals in zip(scores.tolist() , snake_case , snake_case ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
A__ : Union[str, Any] = self.image_processor.post_process_object_detection(snake_case , snake_case , snake_case )
A__ : str = raw_annotations[0]
A__ : str = raw_annotation["""scores"""]
A__ : List[Any] = raw_annotation["""labels"""]
A__ : int = raw_annotation["""boxes"""]
A__ : str = scores.tolist()
A__ : Any = [self.model.config.idalabel[label.item()] for label in labels]
A__ : int = [self._get_bounding_box(snake_case ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
A__ : str = ["""score""", """label""", """box"""]
A__ : Dict = [
dict(zip(snake_case , snake_case ) )
for vals in zip(raw_annotation["""scores"""] , raw_annotation["""labels"""] , raw_annotation["""boxes"""] )
]
return annotation
def _UpperCamelCase ( self : Union[str, Any] , snake_case : "torch.Tensor" ):
'''simple docstring'''
if self.framework != "pt":
raise ValueError("""The ObjectDetectionPipeline is only available in PyTorch.""" )
A__ , A__ , A__ , A__ : Any = box.int().tolist()
A__ : Any = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 296
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
A_ = None
A_ = logging.get_logger(__name__)
A_ = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
A_ = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
'''tokenizer_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''',
},
}
A_ = {
'''google/rembert''': 256,
}
A_ = '''▁'''
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = RemBertTokenizer
def __init__( self : Tuple , snake_case : List[str]=None , snake_case : List[Any]=None , snake_case : Tuple=True , snake_case : Tuple=True , snake_case : Optional[int]=False , snake_case : Optional[Any]="[CLS]" , snake_case : Optional[int]="[SEP]" , snake_case : Optional[Any]="<unk>" , snake_case : Tuple="[SEP]" , snake_case : List[str]="<pad>" , snake_case : Optional[Any]="[CLS]" , snake_case : List[str]="[MASK]" , **snake_case : Dict , ):
'''simple docstring'''
A__ : Optional[Any] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else mask_token
super().__init__(
snake_case , tokenizer_file=snake_case , do_lower_case=snake_case , remove_space=snake_case , keep_accents=snake_case , bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , **snake_case , )
A__ : Any = do_lower_case
A__ : Optional[Any] = remove_space
A__ : Tuple = keep_accents
A__ : str = vocab_file
A__ : Tuple = False if not self.vocab_file else True
def _UpperCamelCase ( self : Optional[int] , snake_case : List[int] , snake_case : Optional[List[int]] = None ):
'''simple docstring'''
A__ : List[Any] = [self.sep_token_id]
A__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _UpperCamelCase ( self : Optional[int] , snake_case : List[int] , snake_case : Optional[List[int]] = None , snake_case : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(snake_case )) + [1] + ([0] * len(snake_case )) + [1]
return [1] + ([0] * len(snake_case )) + [1]
def _UpperCamelCase ( self : str , snake_case : List[int] , snake_case : Optional[List[int]] = None ):
'''simple docstring'''
A__ : Union[str, Any] = [self.sep_token_id]
A__ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCamelCase ( self : List[str] , snake_case : str , snake_case : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(snake_case ):
logger.error("""Vocabulary path ({}) should be a directory""".format(snake_case ) )
return
A__ : Tuple = os.path.join(
snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ):
copyfile(self.vocab_file , snake_case )
return (out_vocab_file,)
| 296
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A_ = logging.get_logger(__name__)
A_ = {
'''microsoft/table-transformer-detection''': (
'''https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'''
),
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = 'table-transformer'
snake_case_ = ['past_key_values']
snake_case_ = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Dict , snake_case : int=True , snake_case : Dict=None , snake_case : Union[str, Any]=3 , snake_case : Dict=100 , snake_case : Tuple=6 , snake_case : Optional[int]=2048 , snake_case : int=8 , snake_case : Dict=6 , snake_case : Any=2048 , snake_case : str=8 , snake_case : Union[str, Any]=0.0 , snake_case : List[str]=0.0 , snake_case : List[str]=True , snake_case : Any="relu" , snake_case : str=256 , snake_case : int=0.1 , snake_case : Dict=0.0 , snake_case : str=0.0 , snake_case : Union[str, Any]=0.02 , snake_case : Union[str, Any]=1.0 , snake_case : Optional[Any]=False , snake_case : int="sine" , snake_case : Optional[Any]="resnet50" , snake_case : Optional[int]=True , snake_case : Any=False , snake_case : int=1 , snake_case : Tuple=5 , snake_case : Optional[int]=2 , snake_case : Tuple=1 , snake_case : Optional[Any]=1 , snake_case : Optional[Any]=5 , snake_case : Dict=2 , snake_case : Any=0.1 , **snake_case : Any , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
A__ : Optional[Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(snake_case , snake_case ):
A__ : Optional[int] = backbone_config.get("""model_type""" )
A__ : Optional[int] = CONFIG_MAPPING[backbone_model_type]
A__ : List[str] = config_class.from_dict(snake_case )
# set timm attributes to None
A__ , A__ , A__ : str = None, None, None
A__ : Tuple = use_timm_backbone
A__ : str = backbone_config
A__ : str = num_channels
A__ : List[Any] = num_queries
A__ : Optional[Any] = d_model
A__ : Tuple = encoder_ffn_dim
A__ : Union[str, Any] = encoder_layers
A__ : List[Any] = encoder_attention_heads
A__ : Optional[int] = decoder_ffn_dim
A__ : Any = decoder_layers
A__ : int = decoder_attention_heads
A__ : Any = dropout
A__ : Dict = attention_dropout
A__ : Dict = activation_dropout
A__ : Tuple = activation_function
A__ : List[str] = init_std
A__ : List[str] = init_xavier_std
A__ : Any = encoder_layerdrop
A__ : Optional[Any] = decoder_layerdrop
A__ : Union[str, Any] = encoder_layers
A__ : Dict = auxiliary_loss
A__ : List[Any] = position_embedding_type
A__ : Optional[Any] = backbone
A__ : str = use_pretrained_backbone
A__ : Union[str, Any] = dilation
# Hungarian matcher
A__ : Tuple = class_cost
A__ : Optional[Any] = bbox_cost
A__ : Dict = giou_cost
# Loss coefficients
A__ : Any = mask_loss_coefficient
A__ : str = dice_loss_coefficient
A__ : str = bbox_loss_coefficient
A__ : Union[str, Any] = giou_loss_coefficient
A__ : List[str] = eos_coefficient
super().__init__(is_encoder_decoder=snake_case , **snake_case )
@property
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
return self.d_model
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = version.parse('1.11' )
@property
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
return 1e-5
@property
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
return 12
| 296
| 1
|
"""simple docstring"""
def _lowerCAmelCase ( UpperCAmelCase__ : int ) ->bool:
A__ : Union[str, Any] = (1 + 2_4 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def _lowerCAmelCase ( UpperCAmelCase__ : int = 5_0_0_0 ) ->int:
A__ : Optional[Any] = [(i * (3 * i - 1)) // 2 for i in range(1, UpperCAmelCase__ )]
for i, pentagonal_i in enumerate(UpperCAmelCase__ ):
for j in range(UpperCAmelCase__, len(UpperCAmelCase__ ) ):
A__ : Any = pentagonal_nums[j]
A__ : Optional[Any] = pentagonal_i + pentagonal_j
A__ : List[str] = pentagonal_j - pentagonal_i
if is_pentagonal(UpperCAmelCase__ ) and is_pentagonal(UpperCAmelCase__ ):
return b
return -1
if __name__ == "__main__":
print(F'{solution() = }')
| 296
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = 'Salesforce/blip-image-captioning-base'
snake_case_ = (
'This is a tool that generates a description of an image. It takes an input named `image` which should be the '
'image to caption, and returns a text that contains the description in English.'
)
snake_case_ = 'image_captioner'
snake_case_ = AutoModelForVisionaSeq
snake_case_ = ['image']
snake_case_ = ['text']
def __init__( self : int , *snake_case : Optional[int] , **snake_case : Optional[int] ):
'''simple docstring'''
requires_backends(self , ["""vision"""] )
super().__init__(*snake_case , **snake_case )
def _UpperCamelCase ( self : int , snake_case : "Image" ):
'''simple docstring'''
return self.pre_processor(images=snake_case , return_tensors="""pt""" )
def _UpperCamelCase ( self : int , snake_case : List[Any] ):
'''simple docstring'''
return self.model.generate(**snake_case )
def _UpperCamelCase ( self : Optional[int] , snake_case : Any ):
'''simple docstring'''
return self.pre_processor.batch_decode(snake_case , skip_special_tokens=snake_case )[0].strip()
| 296
| 1
|
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A_ = logging.get_logger(__name__)
A_ = {
'''facebook/detr-resnet-50''': '''https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json''',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = 'detr'
snake_case_ = ['past_key_values']
snake_case_ = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : List[Any] , snake_case : Optional[int]=True , snake_case : Dict=None , snake_case : Any=3 , snake_case : Optional[Any]=100 , snake_case : List[str]=6 , snake_case : Any=2048 , snake_case : Tuple=8 , snake_case : Any=6 , snake_case : Union[str, Any]=2048 , snake_case : str=8 , snake_case : Dict=0.0 , snake_case : List[Any]=0.0 , snake_case : Tuple=True , snake_case : List[str]="relu" , snake_case : str=256 , snake_case : List[Any]=0.1 , snake_case : int=0.0 , snake_case : Union[str, Any]=0.0 , snake_case : List[Any]=0.02 , snake_case : List[Any]=1.0 , snake_case : Optional[int]=False , snake_case : Optional[Any]="sine" , snake_case : List[str]="resnet50" , snake_case : Union[str, Any]=True , snake_case : str=False , snake_case : Union[str, Any]=1 , snake_case : int=5 , snake_case : Any=2 , snake_case : Tuple=1 , snake_case : Union[str, Any]=1 , snake_case : Union[str, Any]=5 , snake_case : List[Any]=2 , snake_case : int=0.1 , **snake_case : List[Any] , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
A__ : Any = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(snake_case , snake_case ):
A__ : List[str] = backbone_config.get("""model_type""" )
A__ : Dict = CONFIG_MAPPING[backbone_model_type]
A__ : Optional[Any] = config_class.from_dict(snake_case )
# set timm attributes to None
A__ , A__ , A__ : Union[str, Any] = None, None, None
A__ : Dict = use_timm_backbone
A__ : Union[str, Any] = backbone_config
A__ : str = num_channels
A__ : int = num_queries
A__ : Optional[int] = d_model
A__ : List[Any] = encoder_ffn_dim
A__ : int = encoder_layers
A__ : Any = encoder_attention_heads
A__ : Union[str, Any] = decoder_ffn_dim
A__ : int = decoder_layers
A__ : Optional[int] = decoder_attention_heads
A__ : Dict = dropout
A__ : Union[str, Any] = attention_dropout
A__ : Dict = activation_dropout
A__ : Optional[int] = activation_function
A__ : List[Any] = init_std
A__ : str = init_xavier_std
A__ : Optional[Any] = encoder_layerdrop
A__ : List[Any] = decoder_layerdrop
A__ : List[str] = encoder_layers
A__ : str = auxiliary_loss
A__ : Tuple = position_embedding_type
A__ : Optional[Any] = backbone
A__ : Tuple = use_pretrained_backbone
A__ : List[Any] = dilation
# Hungarian matcher
A__ : List[Any] = class_cost
A__ : int = bbox_cost
A__ : List[Any] = giou_cost
# Loss coefficients
A__ : Dict = mask_loss_coefficient
A__ : Tuple = dice_loss_coefficient
A__ : int = bbox_loss_coefficient
A__ : int = giou_loss_coefficient
A__ : Tuple = eos_coefficient
super().__init__(is_encoder_decoder=snake_case , **snake_case )
@property
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def _UpperCamelCase ( self : str ):
'''simple docstring'''
return self.d_model
@classmethod
def _UpperCamelCase ( cls : Optional[int] , snake_case : PretrainedConfig , **snake_case : Optional[Any] ):
'''simple docstring'''
return cls(backbone_config=snake_case , **snake_case )
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : int = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
A__ : Dict = self.backbone_config.to_dict()
A__ : str = self.__class__.model_type
return output
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = version.parse('1.11' )
@property
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
return 1e-5
@property
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return 12
| 296
|
"""simple docstring"""
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : List[Any] ):
'''simple docstring'''
super().__init__()
A__ : int = nn.Linear(3 , 4 )
A__ : Union[str, Any] = nn.BatchNormad(4 )
A__ : Union[str, Any] = nn.Linear(4 , 5 )
def _UpperCamelCase ( self : str , snake_case : List[str] ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(snake_case ) ) )
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ : int = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(snake_case , model.state_dict() )
A__ : List[str] = os.path.join(snake_case , """index.json""" )
self.assertTrue(os.path.isfile(snake_case ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
A__ : List[str] = os.path.join(snake_case , F'{key}.dat' )
self.assertTrue(os.path.isfile(snake_case ) )
# TODO: add tests on the fact weights are properly loaded
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : Union[str, Any] = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
A__ : str = torch.randn(2 , 3 , dtype=snake_case )
with TemporaryDirectory() as tmp_dir:
A__ : List[str] = offload_weight(snake_case , """weight""" , snake_case , {} )
A__ : Union[str, Any] = os.path.join(snake_case , """weight.dat""" )
self.assertTrue(os.path.isfile(snake_case ) )
self.assertDictEqual(snake_case , {"""weight""": {"""shape""": [2, 3], """dtype""": str(snake_case ).split(""".""" )[1]}} )
A__ : str = load_offloaded_weight(snake_case , index["""weight"""] )
self.assertTrue(torch.equal(snake_case , snake_case ) )
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
A__ : str = ModelForTest()
A__ : Union[str, Any] = model.state_dict()
A__ : Optional[int] = {k: v for k, v in state_dict.items() if """linear2""" not in k}
A__ : List[Any] = {k: v for k, v in state_dict.items() if """linear2""" in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(snake_case , snake_case )
A__ : Dict = OffloadedWeightsLoader(state_dict=snake_case , save_folder=snake_case )
# Every key is there with the right value
self.assertEqual(sorted(snake_case ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(snake_case , weight_map[key] ) )
A__ : int = {k: v for k, v in state_dict.items() if """weight""" in k}
A__ : Tuple = {k: v for k, v in state_dict.items() if """weight""" not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(snake_case , snake_case )
A__ : Optional[Any] = OffloadedWeightsLoader(state_dict=snake_case , save_folder=snake_case )
# Every key is there with the right value
self.assertEqual(sorted(snake_case ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(snake_case , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(snake_case , snake_case )
# Duplicates are removed
A__ : int = OffloadedWeightsLoader(state_dict=snake_case , save_folder=snake_case )
# Every key is there with the right value
self.assertEqual(sorted(snake_case ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(snake_case , weight_map[key] ) )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : List[str] = {"""a.1""": 0, """a.10""": 1, """a.2""": 2}
A__ : str = extract_submodules_state_dict(snake_case , ["""a.1""", """a.2"""] )
self.assertDictEqual(snake_case , {"""a.1""": 0, """a.2""": 2} )
A__ : Dict = {"""a.1.a""": 0, """a.10.a""": 1, """a.2.a""": 2}
A__ : int = extract_submodules_state_dict(snake_case , ["""a.1""", """a.2"""] )
self.assertDictEqual(snake_case , {"""a.1.a""": 0, """a.2.a""": 2} )
| 296
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
A_ = logging.get_logger(__name__)
A_ = {
'''openai/whisper-base''': '''https://huggingface.co/openai/whisper-base/resolve/main/config.json''',
}
# fmt: off
A_ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 1_0563, 1_0786,
1_1420, 1_1709, 1_1907, 1_3163, 1_3697, 1_3700, 1_4808, 1_5306, 1_6410, 1_6791,
1_7992, 1_9203, 1_9510, 2_0724, 2_2305, 2_2935, 2_7007, 3_0109, 3_0420, 3_3409,
3_4949, 4_0283, 4_0493, 4_0549, 4_7282, 4_9146, 5_0257, 5_0359, 5_0360, 5_0361
]
A_ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 1_0428, 1_0929, 1_1938, 1_2033, 1_2331, 1_2562, 1_3793,
1_4157, 1_4635, 1_5265, 1_5618, 1_6553, 1_6604, 1_8362, 1_8956, 2_0075, 2_1675,
2_2520, 2_6130, 2_6161, 2_6435, 2_8279, 2_9464, 3_1650, 3_2302, 3_2470, 3_6865,
4_2863, 4_7425, 4_9870, 5_0254, 5_0258, 5_0360, 5_0361, 5_0362
]
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = 'whisper'
snake_case_ = ['past_key_values']
snake_case_ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Any , snake_case : List[str]=5_1865 , snake_case : str=80 , snake_case : Dict=6 , snake_case : Union[str, Any]=4 , snake_case : str=6 , snake_case : int=4 , snake_case : List[str]=1536 , snake_case : Tuple=1536 , snake_case : int=0.0 , snake_case : List[str]=0.0 , snake_case : int=5_0257 , snake_case : Any=True , snake_case : str=True , snake_case : List[Any]="gelu" , snake_case : int=256 , snake_case : Union[str, Any]=0.0 , snake_case : Union[str, Any]=0.0 , snake_case : int=0.0 , snake_case : str=0.02 , snake_case : Tuple=False , snake_case : List[Any]=1500 , snake_case : List[str]=448 , snake_case : Optional[int]=5_0256 , snake_case : Tuple=5_0256 , snake_case : Union[str, Any]=5_0256 , snake_case : Dict=None , snake_case : Optional[int]=[220, 5_0256] , snake_case : int=False , snake_case : Any=256 , snake_case : List[str]=False , snake_case : List[str]=0.05 , snake_case : Tuple=10 , snake_case : int=2 , snake_case : Dict=0.0 , snake_case : str=10 , snake_case : int=0 , snake_case : Union[str, Any]=7 , **snake_case : Optional[int] , ):
'''simple docstring'''
A__ : Union[str, Any] = vocab_size
A__ : List[str] = num_mel_bins
A__ : Optional[int] = d_model
A__ : int = encoder_layers
A__ : Optional[Any] = encoder_attention_heads
A__ : Tuple = decoder_layers
A__ : Optional[int] = decoder_attention_heads
A__ : Optional[int] = decoder_ffn_dim
A__ : Tuple = encoder_ffn_dim
A__ : Optional[int] = dropout
A__ : str = attention_dropout
A__ : List[str] = activation_dropout
A__ : Optional[int] = activation_function
A__ : Any = init_std
A__ : Dict = encoder_layerdrop
A__ : Union[str, Any] = decoder_layerdrop
A__ : Optional[int] = use_cache
A__ : Union[str, Any] = encoder_layers
A__ : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True
A__ : int = max_source_positions
A__ : Any = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
A__ : Optional[Any] = classifier_proj_size
A__ : List[Any] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A__ : Tuple = apply_spec_augment
A__ : Tuple = mask_time_prob
A__ : Optional[int] = mask_time_length
A__ : Optional[Any] = mask_time_min_masks
A__ : List[Any] = mask_feature_prob
A__ : str = mask_feature_length
A__ : Dict = mask_feature_min_masks
A__ : Any = median_filter_width
super().__init__(
pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , is_encoder_decoder=snake_case , decoder_start_token_id=snake_case , suppress_tokens=snake_case , begin_suppress_tokens=snake_case , **snake_case , )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
@property
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : Any = OrderedDict(
[
("""input_features""", {0: """batch""", 1: """feature_size""", 2: """encoder_sequence"""}),
] )
if self.use_past:
A__ : Any = {0: """batch"""}
else:
A__ : List[Any] = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(snake_case , direction="""inputs""" )
return common_inputs
def _UpperCamelCase ( self : List[str] , snake_case : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , snake_case : int = -1 , snake_case : int = -1 , snake_case : bool = False , snake_case : Optional["TensorType"] = None , snake_case : int = 2_2050 , snake_case : float = 5.0 , snake_case : int = 220 , ):
'''simple docstring'''
A__ : Union[str, Any] = OrderedDict()
A__ : Dict = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=snake_case , framework=snake_case , sampling_rate=snake_case , time_duration=snake_case , frequency=snake_case , )
A__ : Dict = encoder_inputs["""input_features"""].shape[2]
A__ : str = encoder_sequence_length // 2 if self.use_past else seq_length
A__ : Optional[Any] = super().generate_dummy_inputs(
preprocessor.tokenizer , snake_case , snake_case , snake_case , snake_case )
A__ : int = encoder_inputs.pop("""input_features""" )
A__ : Tuple = decoder_inputs.pop("""decoder_input_ids""" )
if "past_key_values" in decoder_inputs:
A__ : int = decoder_inputs.pop("""past_key_values""" )
return dummy_inputs
@property
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
return 1e-3
| 296
|
"""simple docstring"""
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , snake_case : str , snake_case : List[Any]=13 , snake_case : Union[str, Any]=7 , snake_case : Optional[Any]=True , snake_case : str=True , snake_case : Dict=False , snake_case : Union[str, Any]=True , snake_case : Optional[Any]=99 , snake_case : str=32 , snake_case : Tuple=5 , snake_case : List[str]=4 , snake_case : Optional[int]=37 , snake_case : str="gelu" , snake_case : Tuple=0.1 , snake_case : Optional[int]=0.1 , snake_case : int=512 , snake_case : List[str]=16 , snake_case : str=2 , snake_case : Optional[int]=0.02 , snake_case : str=3 , snake_case : Dict=4 , snake_case : Optional[Any]=None , ):
'''simple docstring'''
A__ : int = parent
A__ : Union[str, Any] = batch_size
A__ : Optional[int] = seq_length
A__ : List[Any] = is_training
A__ : List[str] = use_input_mask
A__ : Optional[Any] = use_token_type_ids
A__ : List[Any] = use_labels
A__ : Union[str, Any] = vocab_size
A__ : List[Any] = hidden_size
A__ : Any = num_hidden_layers
A__ : Any = num_attention_heads
A__ : Optional[int] = intermediate_size
A__ : Any = hidden_act
A__ : Tuple = hidden_dropout_prob
A__ : Dict = attention_probs_dropout_prob
A__ : Optional[int] = max_position_embeddings
A__ : Tuple = type_vocab_size
A__ : Union[str, Any] = type_sequence_label_size
A__ : List[str] = initializer_range
A__ : Any = num_labels
A__ : Any = num_choices
A__ : int = scope
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Tuple = None
if self.use_input_mask:
A__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
A__ : Union[str, Any] = None
if self.use_token_type_ids:
A__ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ : int = None
A__ : int = None
A__ : List[str] = None
if self.use_labels:
A__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
A__ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , )
def _UpperCamelCase ( self : Optional[int] , snake_case : Dict , snake_case : Any , snake_case : Dict , snake_case : Any , snake_case : Union[str, Any] , snake_case : Tuple , snake_case : Optional[Any] ):
'''simple docstring'''
A__ : Optional[Any] = BioGptModel(config=snake_case )
model.to(snake_case )
model.eval()
A__ : List[Any] = model(snake_case , attention_mask=snake_case )
A__ : Dict = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : Optional[int] , snake_case : Dict , snake_case : Optional[int] , snake_case : List[str] , snake_case : str , snake_case : Optional[Any] , snake_case : List[str] , snake_case : List[Any] , snake_case : Tuple , snake_case : Optional[Any] , ):
'''simple docstring'''
A__ : List[str] = BioGptForCausalLM(config=snake_case )
model.to(snake_case )
model.eval()
A__ : Union[str, Any] = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self : Any , snake_case : str , snake_case : Tuple , snake_case : int , snake_case : Optional[Any] , snake_case : Any , *snake_case : Dict ):
'''simple docstring'''
A__ : Union[str, Any] = BioGptModel(config=snake_case )
model.to(snake_case )
model.eval()
# create attention mask
A__ : List[Any] = torch.ones(input_ids.shape , dtype=torch.long , device=snake_case )
A__ : Any = self.seq_length // 2
A__ : str = 0
# first forward pass
A__ , A__ : List[Any] = model(snake_case , attention_mask=snake_case ).to_tuple()
# create hypothetical next token and extent to next_input_ids
A__ : int = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
A__ : List[str] = ids_tensor((1,) , snake_case ).item() + 1
A__ : Optional[int] = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
A__ : int = random_other_next_tokens
# append to next input_ids and attn_mask
A__ : str = torch.cat([input_ids, next_tokens] , dim=-1 )
A__ : List[Any] = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=snake_case )] , dim=1 , )
# get two different outputs
A__ : Union[str, Any] = model(snake_case , attention_mask=snake_case )["""last_hidden_state"""]
A__ : Optional[int] = model(snake_case , past_key_values=snake_case , attention_mask=snake_case )["""last_hidden_state"""]
# select random slice
A__ : List[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A__ : List[str] = output_from_no_past[:, -1, random_slice_idx].detach()
A__ : Any = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case , snake_case , atol=1e-3 ) )
def _UpperCamelCase ( self : int , snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : str , snake_case : int , snake_case : Optional[Any] , *snake_case : str ):
'''simple docstring'''
A__ : Dict = BioGptModel(config=snake_case ).to(snake_case ).eval()
A__ : Tuple = torch.ones(input_ids.shape , dtype=torch.long , device=snake_case )
# first forward pass
A__ : Dict = model(snake_case , attention_mask=snake_case , use_cache=snake_case )
A__ , A__ : List[Any] = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
A__ : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ : int = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
A__ : str = torch.cat([input_ids, next_tokens] , dim=-1 )
A__ : Optional[int] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
A__ : Any = model(snake_case , attention_mask=snake_case )["""last_hidden_state"""]
A__ : Union[str, Any] = model(snake_case , attention_mask=snake_case , past_key_values=snake_case )[
"""last_hidden_state"""
]
# select random slice
A__ : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A__ : Any = output_from_no_past[:, -3:, random_slice_idx].detach()
A__ : List[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case , snake_case , atol=1e-3 ) )
def _UpperCamelCase ( self : Optional[Any] , snake_case : Tuple , snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : Any , snake_case : Tuple , *snake_case : Union[str, Any] , snake_case : Union[str, Any]=False ):
'''simple docstring'''
A__ : Tuple = BioGptForCausalLM(snake_case )
model.to(snake_case )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
A__ : Optional[Any] = model(snake_case , labels=snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def _UpperCamelCase ( self : int , snake_case : Optional[Any] , *snake_case : Optional[int] ):
'''simple docstring'''
A__ : int = BioGptModel(snake_case )
A__ : Union[str, Any] = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def _UpperCamelCase ( self : Any , snake_case : Dict , snake_case : Tuple , snake_case : int , snake_case : Union[str, Any] , snake_case : Dict , *snake_case : Any ):
'''simple docstring'''
A__ : Union[str, Any] = self.num_labels
A__ : int = BioGptForTokenClassification(snake_case )
model.to(snake_case )
model.eval()
A__ : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : List[str] = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : str = config_and_inputs
A__ : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase ):
snake_case_ = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
snake_case_ = (BioGptForCausalLM,) if is_torch_available() else ()
snake_case_ = (
{
'feature-extraction': BioGptModel,
'text-classification': BioGptForSequenceClassification,
'text-generation': BioGptForCausalLM,
'token-classification': BioGptForTokenClassification,
'zero-shot': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = False
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : List[str] = BioGptModelTester(self )
A__ : List[Any] = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
A__ : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A__ : str = type
self.model_tester.create_and_check_model(*snake_case )
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*snake_case )
def _UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
A__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*snake_case , gradient_checkpointing=snake_case )
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
A__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*snake_case )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*snake_case )
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
A__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*snake_case )
@slow
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ : Tuple = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
model.to(snake_case )
A__ : Optional[int] = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
A__ : Any = """left"""
# Define PAD Token = EOS Token = 50256
A__ : Optional[int] = tokenizer.eos_token
A__ : Dict = model.config.eos_token_id
# use different length sentences to test batching
A__ : Union[str, Any] = [
"""Hello, my dog is a little""",
"""Today, I""",
]
A__ : List[str] = tokenizer(snake_case , return_tensors="""pt""" , padding=snake_case )
A__ : str = inputs["""input_ids"""].to(snake_case )
A__ : Dict = model.generate(
input_ids=snake_case , attention_mask=inputs["""attention_mask"""].to(snake_case ) , )
A__ : Optional[int] = tokenizer(sentences[0] , return_tensors="""pt""" ).input_ids.to(snake_case )
A__ : Any = model.generate(input_ids=snake_case )
A__ : List[str] = inputs_non_padded.shape[-1] - inputs["""attention_mask"""][-1].long().sum().cpu().item()
A__ : str = tokenizer(sentences[1] , return_tensors="""pt""" ).input_ids.to(snake_case )
A__ : Dict = model.generate(input_ids=snake_case , max_length=model.config.max_length - num_paddings )
A__ : Optional[Any] = tokenizer.batch_decode(snake_case , skip_special_tokens=snake_case )
A__ : List[Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=snake_case )
A__ : str = tokenizer.decode(output_padded[0] , skip_special_tokens=snake_case )
A__ : Optional[int] = [
"""Hello, my dog is a little bit bigger than a little bit.""",
"""Today, I have a good idea of how to use the information""",
]
self.assertListEqual(snake_case , snake_case )
self.assertListEqual(snake_case , [non_padded_sentence, padded_sentence] )
@slow
def _UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Optional[Any] = BioGptModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ , A__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
A__ : Optional[int] = 3
A__ : List[Any] = input_dict["""input_ids"""]
A__ : Dict = input_ids.ne(1 ).to(snake_case )
A__ : Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
A__ : Union[str, Any] = BioGptForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
A__ : int = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ , A__ : str = self.model_tester.prepare_config_and_inputs_for_common()
A__ : Any = 3
A__ : List[Any] = """multi_label_classification"""
A__ : Dict = input_dict["""input_ids"""]
A__ : Tuple = input_ids.ne(1 ).to(snake_case )
A__ : Any = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
A__ : Tuple = BioGptForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
A__ : List[str] = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
A__ : Optional[Any] = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
A__ : str = torch.tensor([[2, 4805, 9, 656, 21]] )
A__ : Dict = model(snake_case )[0]
A__ : Tuple = 4_2384
A__ : str = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , snake_case )
A__ : str = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) )
@slow
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
A__ : Tuple = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
A__ : Any = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
model.to(snake_case )
torch.manual_seed(0 )
A__ : Tuple = tokenizer("""COVID-19 is""" , return_tensors="""pt""" ).to(snake_case )
A__ : Optional[int] = model.generate(
**snake_case , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=snake_case , )
A__ : Optional[int] = tokenizer.decode(output_ids[0] , skip_special_tokens=snake_case )
A__ : List[str] = (
"""COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the"""
""" causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and"""
""" territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),"""
""" and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and"""
""" more than 800,000 deaths."""
)
self.assertEqual(snake_case , snake_case )
| 296
| 1
|
"""simple docstring"""
import math
def _lowerCAmelCase ( UpperCAmelCase__ : list, UpperCAmelCase__ : int ) ->int:
A__ : Any = len(UpperCAmelCase__ )
A__ : Optional[int] = int(math.floor(math.sqrt(UpperCAmelCase__ ) ) )
A__ : int = 0
while arr[min(UpperCAmelCase__, UpperCAmelCase__ ) - 1] < x:
A__ : int = step
step += int(math.floor(math.sqrt(UpperCAmelCase__ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
A__ : Union[str, Any] = prev + 1
if prev == min(UpperCAmelCase__, UpperCAmelCase__ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
A_ = input('''Enter numbers separated by a comma:\n''').strip()
A_ = [int(item) for item in user_input.split(''',''')]
A_ = int(input('''Enter the number to be searched:\n'''))
A_ = jump_search(arr, x)
if res == -1:
print('''Number not found!''')
else:
print(F'Number {x} is at index {res}')
| 296
|
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
A_ = logging.get_logger(__name__)
A_ = {'''vocab_file''': '''spiece.model'''}
A_ = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
}
}
A_ = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
# Segments (not really needed)
A_ = 0
A_ = 1
A_ = 2
A_ = 3
A_ = 4
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = 'left'
def __init__( self : Dict , snake_case : int , snake_case : List[Any]=False , snake_case : List[str]=True , snake_case : Dict=False , snake_case : Optional[Any]="<s>" , snake_case : List[str]="</s>" , snake_case : Tuple="<unk>" , snake_case : Tuple="<sep>" , snake_case : Union[str, Any]="<pad>" , snake_case : Dict="<cls>" , snake_case : Optional[Any]="<mask>" , snake_case : Optional[int]=["<eop>", "<eod>"] , snake_case : Optional[Dict[str, Any]] = None , **snake_case : Dict , ):
'''simple docstring'''
A__ : Optional[int] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else mask_token
A__ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=snake_case , remove_space=snake_case , keep_accents=snake_case , bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , additional_special_tokens=snake_case , sp_model_kwargs=self.sp_model_kwargs , **snake_case , )
A__ : str = 3
A__ : str = do_lower_case
A__ : Optional[Any] = remove_space
A__ : List[Any] = keep_accents
A__ : Union[str, Any] = vocab_file
A__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case )
@property
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
return len(self.sp_model )
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : int = {self.convert_ids_to_tokens(snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ):
'''simple docstring'''
A__ : int = self.__dict__.copy()
A__ : int = None
return state
def __setstate__( self : Tuple , snake_case : Union[str, Any] ):
'''simple docstring'''
A__ : int = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
A__ : Optional[int] = {}
A__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCamelCase ( self : List[str] , snake_case : Optional[Any] ):
'''simple docstring'''
if self.remove_space:
A__ : Optional[Any] = """ """.join(inputs.strip().split() )
else:
A__ : Dict = inputs
A__ : str = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
A__ : Any = unicodedata.normalize("""NFKD""" , snake_case )
A__ : Optional[int] = """""".join([c for c in outputs if not unicodedata.combining(snake_case )] )
if self.do_lower_case:
A__ : Any = outputs.lower()
return outputs
def _UpperCamelCase ( self : Union[str, Any] , snake_case : str ):
'''simple docstring'''
A__ : Dict = self.preprocess_text(snake_case )
A__ : Dict = self.sp_model.encode(snake_case , out_type=snake_case )
A__ : Optional[int] = []
for piece in pieces:
if len(snake_case ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
A__ : Optional[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(snake_case , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
A__ : int = cur_pieces[1:]
else:
A__ : Any = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(snake_case )
else:
new_pieces.append(snake_case )
return new_pieces
def _UpperCamelCase ( self : List[str] , snake_case : Tuple ):
'''simple docstring'''
return self.sp_model.PieceToId(snake_case )
def _UpperCamelCase ( self : List[str] , snake_case : Any ):
'''simple docstring'''
return self.sp_model.IdToPiece(snake_case )
def _UpperCamelCase ( self : Optional[int] , snake_case : Any ):
'''simple docstring'''
A__ : Union[str, Any] = """""".join(snake_case ).replace(snake_case , """ """ ).strip()
return out_string
def _UpperCamelCase ( self : int , snake_case : List[int] , snake_case : bool = False , snake_case : bool = None , snake_case : bool = True , **snake_case : Union[str, Any] , ):
'''simple docstring'''
A__ : List[str] = kwargs.pop("""use_source_tokenizer""" , snake_case )
A__ : Any = self.convert_ids_to_tokens(snake_case , skip_special_tokens=snake_case )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
A__ : Any = []
A__ : Any = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(snake_case ) )
A__ : str = []
sub_texts.append(snake_case )
else:
current_sub_text.append(snake_case )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(snake_case ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
A__ : Dict = """""".join(snake_case )
A__ : int = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
A__ : Tuple = self.clean_up_tokenization(snake_case )
return clean_text
else:
return text
def _UpperCamelCase ( self : str , snake_case : List[int] , snake_case : Optional[List[int]] = None ):
'''simple docstring'''
A__ : Tuple = [self.sep_token_id]
A__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _UpperCamelCase ( self : Dict , snake_case : List[int] , snake_case : Optional[List[int]] = None , snake_case : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case )
if token_ids_a is not None:
return ([0] * len(snake_case )) + [1] + ([0] * len(snake_case )) + [1, 1]
return ([0] * len(snake_case )) + [1, 1]
def _UpperCamelCase ( self : str , snake_case : List[int] , snake_case : Optional[List[int]] = None ):
'''simple docstring'''
A__ : Any = [self.sep_token_id]
A__ : int = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _UpperCamelCase ( self : Optional[Any] , snake_case : str , snake_case : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(snake_case ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A__ : List[Any] = os.path.join(
snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case , """wb""" ) as fi:
A__ : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(snake_case )
return (out_vocab_file,)
| 296
| 1
|
"""simple docstring"""
import doctest
from collections import deque
import numpy as np
class __SCREAMING_SNAKE_CASE :
def __init__( self : Dict ):
'''simple docstring'''
A__ : List[str] = [2, 1, 2, -1]
A__ : Tuple = [1, 2, 3, 4]
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : List[Any] = len(self.first_signal )
A__ : Optional[int] = len(self.second_signal )
A__ : Dict = max(snake_case , snake_case )
# create a zero matrix of max_length x max_length
A__ : Optional[int] = [[0] * max_length for i in range(snake_case )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(snake_case ):
A__ : List[Any] = deque(self.second_signal )
rotated_signal.rotate(snake_case )
for j, item in enumerate(snake_case ):
matrix[i][j] += item
# multiply the matrix with the first signal
A__ : List[Any] = np.matmul(np.transpose(snake_case ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(snake_case , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 296
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
def _lowerCAmelCase ( UpperCAmelCase__ : List[Any] ) ->List[str]:
A__ : Union[str, Any] = DPTConfig()
if "large" in checkpoint_url:
A__ : int = 1_0_2_4
A__ : Union[str, Any] = 4_0_9_6
A__ : Optional[int] = 2_4
A__ : int = 1_6
A__ : Union[str, Any] = [5, 1_1, 1_7, 2_3]
A__ : Tuple = [2_5_6, 5_1_2, 1_0_2_4, 1_0_2_4]
A__ : Tuple = (1, 3_8_4, 3_8_4)
if "ade" in checkpoint_url:
A__ : Optional[int] = True
A__ : int = 1_5_0
A__ : Union[str, Any] = """huggingface/label-files"""
A__ : List[Any] = """ade20k-id2label.json"""
A__ : Union[str, Any] = json.load(open(cached_download(hf_hub_url(UpperCAmelCase__, UpperCAmelCase__, repo_type="""dataset""" ) ), """r""" ) )
A__ : List[Any] = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()}
A__ : Dict = idalabel
A__ : List[Any] = {v: k for k, v in idalabel.items()}
A__ : Optional[Any] = [1, 1_5_0, 4_8_0, 4_8_0]
return config, expected_shape
def _lowerCAmelCase ( UpperCAmelCase__ : int ) ->Any:
A__ : List[Any] = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase__, UpperCAmelCase__ )
def _lowerCAmelCase ( UpperCAmelCase__ : Union[str, Any] ) ->List[str]:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
A__ : str = name.replace("""pretrained.model""", """dpt.encoder""" )
if "pretrained.model" in name:
A__ : Dict = name.replace("""pretrained.model""", """dpt.embeddings""" )
if "patch_embed" in name:
A__ : List[Any] = name.replace("""patch_embed""", """patch_embeddings""" )
if "pos_embed" in name:
A__ : int = name.replace("""pos_embed""", """position_embeddings""" )
if "attn.proj" in name:
A__ : Tuple = name.replace("""attn.proj""", """attention.output.dense""" )
if "proj" in name and "project" not in name:
A__ : List[Any] = name.replace("""proj""", """projection""" )
if "blocks" in name:
A__ : Optional[Any] = name.replace("""blocks""", """layer""" )
if "mlp.fc1" in name:
A__ : int = name.replace("""mlp.fc1""", """intermediate.dense""" )
if "mlp.fc2" in name:
A__ : List[str] = name.replace("""mlp.fc2""", """output.dense""" )
if "norm1" in name:
A__ : Any = name.replace("""norm1""", """layernorm_before""" )
if "norm2" in name:
A__ : List[str] = name.replace("""norm2""", """layernorm_after""" )
if "scratch.output_conv" in name:
A__ : Optional[int] = name.replace("""scratch.output_conv""", """head""" )
if "scratch" in name:
A__ : List[str] = name.replace("""scratch""", """neck""" )
if "layer1_rn" in name:
A__ : List[str] = name.replace("""layer1_rn""", """convs.0""" )
if "layer2_rn" in name:
A__ : Optional[int] = name.replace("""layer2_rn""", """convs.1""" )
if "layer3_rn" in name:
A__ : Any = name.replace("""layer3_rn""", """convs.2""" )
if "layer4_rn" in name:
A__ : Any = name.replace("""layer4_rn""", """convs.3""" )
if "refinenet" in name:
A__ : Union[str, Any] = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
A__ : str = name.replace(f'refinenet{layer_idx}', f'fusion_stage.layers.{abs(layer_idx-4 )}' )
if "out_conv" in name:
A__ : Optional[Any] = name.replace("""out_conv""", """projection""" )
if "resConfUnit1" in name:
A__ : List[Any] = name.replace("""resConfUnit1""", """residual_layer1""" )
if "resConfUnit2" in name:
A__ : Tuple = name.replace("""resConfUnit2""", """residual_layer2""" )
if "conv1" in name:
A__ : Tuple = name.replace("""conv1""", """convolution1""" )
if "conv2" in name:
A__ : List[Any] = name.replace("""conv2""", """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
A__ : Union[str, Any] = name.replace("""pretrained.act_postprocess1.0.project.0""", """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
A__ : Tuple = name.replace("""pretrained.act_postprocess2.0.project.0""", """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
A__ : Optional[Any] = name.replace("""pretrained.act_postprocess3.0.project.0""", """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
A__ : Optional[Any] = name.replace("""pretrained.act_postprocess4.0.project.0""", """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
A__ : Any = name.replace("""pretrained.act_postprocess1.3""", """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
A__ : List[Any] = name.replace("""pretrained.act_postprocess1.4""", """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
A__ : Dict = name.replace("""pretrained.act_postprocess2.3""", """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
A__ : Optional[Any] = name.replace("""pretrained.act_postprocess2.4""", """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
A__ : Union[str, Any] = name.replace("""pretrained.act_postprocess3.3""", """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
A__ : Optional[int] = name.replace("""pretrained.act_postprocess4.3""", """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
A__ : Dict = name.replace("""pretrained.act_postprocess4.4""", """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
A__ : Union[str, Any] = name.replace("""pretrained""", """dpt""" )
if "bn" in name:
A__ : Union[str, Any] = name.replace("""bn""", """batch_norm""" )
if "head" in name:
A__ : Dict = name.replace("""head""", """head.head""" )
if "encoder.norm" in name:
A__ : Optional[int] = name.replace("""encoder.norm""", """layernorm""" )
if "auxlayer" in name:
A__ : List[str] = name.replace("""auxlayer""", """auxiliary_head.head""" )
return name
def _lowerCAmelCase ( UpperCAmelCase__ : int, UpperCAmelCase__ : Dict ) ->str:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ : Any = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight' )
A__ : Tuple = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
A__ : List[str] = in_proj_weight[: config.hidden_size, :]
A__ : int = in_proj_bias[: config.hidden_size]
A__ : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ : str = in_proj_weight[
-config.hidden_size :, :
]
A__ : Optional[Any] = in_proj_bias[-config.hidden_size :]
def _lowerCAmelCase ( ) ->List[str]:
A__ : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A__ : int = Image.open(requests.get(UpperCAmelCase__, stream=UpperCAmelCase__ ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase ( UpperCAmelCase__ : int, UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : str, UpperCAmelCase__ : int ) ->str:
A__ , A__ : Dict = get_dpt_config(UpperCAmelCase__ )
# load original state_dict from URL
A__ : Any = torch.hub.load_state_dict_from_url(UpperCAmelCase__, map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(UpperCAmelCase__ )
# rename keys
for key in state_dict.copy().keys():
A__ : int = state_dict.pop(UpperCAmelCase__ )
A__ : str = val
# read in qkv matrices
read_in_q_k_v(UpperCAmelCase__, UpperCAmelCase__ )
# load HuggingFace model
A__ : Optional[Any] = DPTForSemanticSegmentation(UpperCAmelCase__ ) if """ade""" in checkpoint_url else DPTForDepthEstimation(UpperCAmelCase__ )
model.load_state_dict(UpperCAmelCase__ )
model.eval()
# Check outputs on an image
A__ : Optional[Any] = 4_8_0 if """ade""" in checkpoint_url else 3_8_4
A__ : Dict = DPTImageProcessor(size=UpperCAmelCase__ )
A__ : Optional[int] = prepare_img()
A__ : Any = image_processor(UpperCAmelCase__, return_tensors="""pt""" )
# forward pass
A__ : List[str] = model(**UpperCAmelCase__ ).logits if """ade""" in checkpoint_url else model(**UpperCAmelCase__ ).predicted_depth
# Assert logits
A__ : Optional[Any] = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]] )
if "ade" in checkpoint_url:
A__ : Optional[int] = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]] )
assert outputs.shape == torch.Size(UpperCAmelCase__ )
assert (
torch.allclose(outputs[0, 0, :3, :3], UpperCAmelCase__, atol=1e-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3], UpperCAmelCase__ )
)
Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCAmelCase__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(UpperCAmelCase__ )
if push_to_hub:
print("""Pushing model to hub...""" )
model.push_to_hub(
repo_path_or_name=Path(UpperCAmelCase__, UpperCAmelCase__ ), organization="""nielsr""", commit_message="""Add model""", use_temp_dir=UpperCAmelCase__, )
image_processor.push_to_hub(
repo_path_or_name=Path(UpperCAmelCase__, UpperCAmelCase__ ), organization="""nielsr""", commit_message="""Add image processor""", use_temp_dir=UpperCAmelCase__, )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''',
type=str,
help='''URL of the original DPT checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
parser.add_argument(
'''--model_name''',
default='''dpt-large''',
type=str,
help='''Name of the model, in case you\'re pushing to the hub.''',
)
A_ = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 296
| 1
|
"""simple docstring"""
def _lowerCAmelCase ( UpperCAmelCase__ : str ) ->int:
stooge(UpperCAmelCase__, 0, len(UpperCAmelCase__ ) - 1 )
return arr
def _lowerCAmelCase ( UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Dict ) ->List[str]:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
A__ , A__ : str = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
A__ : Tuple = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(UpperCAmelCase__, UpperCAmelCase__, (h - t) )
# Recursively sort last 2/3 elements
stooge(UpperCAmelCase__, i + t, (UpperCAmelCase__) )
# Recursively sort first 2/3 elements
stooge(UpperCAmelCase__, UpperCAmelCase__, (h - t) )
if __name__ == "__main__":
A_ = input('''Enter numbers separated by a comma:\n''').strip()
A_ = [int(item) for item in user_input.split(''',''')]
print(stooge_sort(unsorted))
| 296
|
"""simple docstring"""
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
A_ = '''src/diffusers'''
A_ = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
A_ = importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
A_ = spec.loader.load_module()
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Optional[Any] ) ->Any:
return line.startswith(UpperCAmelCase__ ) or len(UpperCAmelCase__ ) <= 1 or re.search(R"""^\s*\)(\s*->.*:|:)\s*$""", UpperCAmelCase__ ) is not None
def _lowerCAmelCase ( UpperCAmelCase__ : List[str] ) ->Union[str, Any]:
A__ : Any = object_name.split(""".""" )
A__ : int = 0
# First let's find the module where our object lives.
A__ : str = parts[i]
while i < len(UpperCAmelCase__ ) and not os.path.isfile(os.path.join(UpperCAmelCase__, f'{module}.py' ) ):
i += 1
if i < len(UpperCAmelCase__ ):
A__ : Union[str, Any] = os.path.join(UpperCAmelCase__, parts[i] )
if i >= len(UpperCAmelCase__ ):
raise ValueError(f'`object_name` should begin with the name of a module of diffusers but got {object_name}.' )
with open(os.path.join(UpperCAmelCase__, f'{module}.py' ), """r""", encoding="""utf-8""", newline="""\n""" ) as f:
A__ : List[Any] = f.readlines()
# Now let's find the class / func in the code!
A__ : Optional[Any] = """"""
A__ : Any = 0
for name in parts[i + 1 :]:
while (
line_index < len(UpperCAmelCase__ ) and re.search(Rf'^{indent}(class|def)\s+{name}(\(|\:)', lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(UpperCAmelCase__ ):
raise ValueError(f' {object_name} does not match any function or class in {module}.' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
A__ : List[Any] = line_index
while line_index < len(UpperCAmelCase__ ) and _should_continue(lines[line_index], UpperCAmelCase__ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
A__ : List[Any] = lines[start_index:line_index]
return "".join(UpperCAmelCase__ )
A_ = re.compile(r'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
A_ = re.compile(r'''^\s*(\S+)->(\S+)(\s+.*|$)''')
A_ = re.compile(r'''<FILL\s+[^>]*>''')
def _lowerCAmelCase ( UpperCAmelCase__ : List[str] ) ->Optional[Any]:
A__ : Dict = code.split("""\n""" )
A__ : List[Any] = 0
while idx < len(UpperCAmelCase__ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(UpperCAmelCase__ ):
return re.search(R"""^(\s*)\S""", lines[idx] ).groups()[0]
return ""
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[Any] ) ->int:
A__ : str = len(get_indent(UpperCAmelCase__ ) ) > 0
if has_indent:
A__ : Union[str, Any] = f'class Bla:\n{code}'
A__ : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa}, line_length=1_1_9, preview=UpperCAmelCase__ )
A__ : Tuple = black.format_str(UpperCAmelCase__, mode=UpperCAmelCase__ )
A__ , A__ : List[Any] = style_docstrings_in_code(UpperCAmelCase__ )
return result[len("""class Bla:\n""" ) :] if has_indent else result
def _lowerCAmelCase ( UpperCAmelCase__ : Any, UpperCAmelCase__ : Dict=False ) ->List[Any]:
with open(UpperCAmelCase__, """r""", encoding="""utf-8""", newline="""\n""" ) as f:
A__ : int = f.readlines()
A__ : Dict = []
A__ : List[str] = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(UpperCAmelCase__ ):
A__ : Dict = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
A__ , A__ , A__ : Dict = search.groups()
A__ : Tuple = find_code_in_diffusers(UpperCAmelCase__ )
A__ : int = get_indent(UpperCAmelCase__ )
A__ : List[str] = line_index + 1 if indent == theoretical_indent else line_index + 2
A__ : Tuple = theoretical_indent
A__ : Optional[Any] = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
A__ : Tuple = True
while line_index < len(UpperCAmelCase__ ) and should_continue:
line_index += 1
if line_index >= len(UpperCAmelCase__ ):
break
A__ : Optional[int] = lines[line_index]
A__ : Tuple = _should_continue(UpperCAmelCase__, UpperCAmelCase__ ) and re.search(f'^{indent}# End copy', UpperCAmelCase__ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
A__ : Dict = lines[start_index:line_index]
A__ : Tuple = """""".join(UpperCAmelCase__ )
# Remove any nested `Copied from` comments to avoid circular copies
A__ : Optional[int] = [line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(UpperCAmelCase__ ) is None]
A__ : Optional[Any] = """\n""".join(UpperCAmelCase__ )
# Before comparing, use the `replace_pattern` on the original code.
if len(UpperCAmelCase__ ) > 0:
A__ : int = replace_pattern.replace("""with""", """""" ).split(""",""" )
A__ : List[Any] = [_re_replace_pattern.search(UpperCAmelCase__ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
A__ , A__ , A__ : Union[str, Any] = pattern.groups()
A__ : Union[str, Any] = re.sub(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
if option.strip() == "all-casing":
A__ : List[Any] = re.sub(obja.lower(), obja.lower(), UpperCAmelCase__ )
A__ : Tuple = re.sub(obja.upper(), obja.upper(), UpperCAmelCase__ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
A__ : Optional[int] = blackify(lines[start_index - 1] + theoretical_code )
A__ : List[Any] = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
A__ : List[Any] = lines[:start_index] + [theoretical_code] + lines[line_index:]
A__ : Tuple = start_index + 1
if overwrite and len(UpperCAmelCase__ ) > 0:
# Warn the user a file has been modified.
print(f'Detected changes, rewriting {filename}.' )
with open(UpperCAmelCase__, """w""", encoding="""utf-8""", newline="""\n""" ) as f:
f.writelines(UpperCAmelCase__ )
return diffs
def _lowerCAmelCase ( UpperCAmelCase__ : bool = False ) ->Any:
A__ : Dict = glob.glob(os.path.join(UpperCAmelCase__, """**/*.py""" ), recursive=UpperCAmelCase__ )
A__ : str = []
for filename in all_files:
A__ : Any = is_copy_consistent(UpperCAmelCase__, UpperCAmelCase__ )
diffs += [f'- {filename}: copy does not match {d[0]} at line {d[1]}' for d in new_diffs]
if not overwrite and len(UpperCAmelCase__ ) > 0:
A__ : Any = """\n""".join(UpperCAmelCase__ )
raise Exception(
"""Found the following copy inconsistencies:\n"""
+ diff
+ """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
A_ = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 296
| 1
|
"""simple docstring"""
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
A_ = logging.get_logger(__name__)
@add_end_docstrings(UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
def __init__( self : Any , *snake_case : Union[str, Any] , **snake_case : Optional[Any] ):
'''simple docstring'''
super().__init__(*snake_case , **snake_case )
self.check_model_type(snake_case )
def _UpperCamelCase ( self : Any , snake_case : List[str]=None , snake_case : List[str]=None , snake_case : Union[str, Any]=None , **snake_case : Optional[int] ):
'''simple docstring'''
A__ , A__ : Optional[Any] = {}, {}
if padding is not None:
A__ : Any = padding
if truncation is not None:
A__ : str = truncation
if top_k is not None:
A__ : Tuple = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Optional[Any] , snake_case : Union["Image.Image", str] , snake_case : str = None , **snake_case : Dict ):
'''simple docstring'''
if isinstance(snake_case , (Image.Image, str) ) and isinstance(snake_case , snake_case ):
A__ : List[Any] = {"""image""": image, """question""": question}
else:
A__ : Union[str, Any] = image
A__ : Union[str, Any] = super().__call__(snake_case , **snake_case )
return results
def _UpperCamelCase ( self : Union[str, Any] , snake_case : Optional[Any] , snake_case : Union[str, Any]=False , snake_case : Tuple=False ):
'''simple docstring'''
A__ : Any = load_image(inputs["""image"""] )
A__ : List[str] = self.tokenizer(
inputs["""question"""] , return_tensors=self.framework , padding=snake_case , truncation=snake_case )
A__ : str = self.image_processor(images=snake_case , return_tensors=self.framework )
model_inputs.update(snake_case )
return model_inputs
def _UpperCamelCase ( self : Optional[int] , snake_case : Union[str, Any] ):
'''simple docstring'''
A__ : Union[str, Any] = self.model(**snake_case )
return model_outputs
def _UpperCamelCase ( self : Dict , snake_case : Optional[int] , snake_case : List[Any]=5 ):
'''simple docstring'''
if top_k > self.model.config.num_labels:
A__ : Dict = self.model.config.num_labels
if self.framework == "pt":
A__ : Tuple = model_outputs.logits.sigmoid()[0]
A__ , A__ : List[Any] = probs.topk(snake_case )
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
A__ : int = scores.tolist()
A__ : Dict = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(snake_case , snake_case )]
| 296
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A_ = {
'''configuration_llama''': ['''LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LlamaConfig'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ['''LlamaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ['''LlamaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''LlamaForCausalLM''',
'''LlamaModel''',
'''LlamaPreTrainedModel''',
'''LlamaForSequenceClassification''',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 296
| 1
|
"""simple docstring"""
from math import isclose, sqrt
def _lowerCAmelCase ( UpperCAmelCase__ : float, UpperCAmelCase__ : float, UpperCAmelCase__ : float ) ->tuple[float, float, float]:
A__ : List[Any] = point_y / 4 / point_x
A__ : Any = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
A__ : str = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
A__ : Dict = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
A__ : Optional[int] = outgoing_gradient**2 + 4
A__ : List[Any] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
A__ : Any = (point_y - outgoing_gradient * point_x) ** 2 - 1_0_0
A__ : Dict = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
A__ : List[str] = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
A__ : int = x_minus if isclose(UpperCAmelCase__, UpperCAmelCase__ ) else x_plus
A__ : Any = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def _lowerCAmelCase ( UpperCAmelCase__ : float = 1.4, UpperCAmelCase__ : float = -9.6 ) ->int:
A__ : int = 0
A__ : float = first_x_coord
A__ : float = first_y_coord
A__ : float = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
A__ , A__ , A__ : Optional[Any] = next_point(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F'{solution() = }')
| 296
|
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
A_ = object()
# For specifying empty leaf dict `{}`
A_ = object()
def _lowerCAmelCase ( UpperCAmelCase__ : Tuple, UpperCAmelCase__ : List[Any] ) ->Dict:
A__ : Union[str, Any] = tuple((re.compile(x + """$""" ) for x in qs) )
for i in range(len(UpperCAmelCase__ ) - len(UpperCAmelCase__ ) + 1 ):
A__ : Optional[Any] = [x.match(UpperCAmelCase__ ) for x, y in zip(UpperCAmelCase__, ks[i:] )]
if matches and all(UpperCAmelCase__ ):
return True
return False
def _lowerCAmelCase ( UpperCAmelCase__ : List[Any] ) ->Dict:
def replace(UpperCAmelCase__ : int, UpperCAmelCase__ : List[str] ):
for rule, replacement in rules:
if _match(UpperCAmelCase__, UpperCAmelCase__ ):
return replacement
return val
return replace
def _lowerCAmelCase ( ) ->Tuple:
return [
# embeddings
(("transformer", "wpe", "embedding"), P("""mp""", UpperCAmelCase__ )),
(("transformer", "wte", "embedding"), P("""mp""", UpperCAmelCase__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(UpperCAmelCase__, """mp""" )),
(("attention", "out_proj", "kernel"), P("""mp""", UpperCAmelCase__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(UpperCAmelCase__, """mp""" )),
(("mlp", "c_fc", "bias"), P("""mp""" )),
(("mlp", "c_proj", "kernel"), P("""mp""", UpperCAmelCase__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def _lowerCAmelCase ( UpperCAmelCase__ : Tuple ) ->Any:
A__ : Union[str, Any] = _get_partition_rules()
A__ : int = _replacement_rules(UpperCAmelCase__ )
A__ : Tuple = {k: _unmatched for k in flatten_dict(UpperCAmelCase__ )}
A__ : Optional[int] = {k: replace(UpperCAmelCase__, UpperCAmelCase__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(UpperCAmelCase__ ) )
| 296
| 1
|
"""simple docstring"""
def _lowerCAmelCase ( UpperCAmelCase__ : int, UpperCAmelCase__ : float, UpperCAmelCase__ : float ) ->float:
return round(float(moles / volume ) * nfactor )
def _lowerCAmelCase ( UpperCAmelCase__ : float, UpperCAmelCase__ : float, UpperCAmelCase__ : float ) ->float:
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def _lowerCAmelCase ( UpperCAmelCase__ : float, UpperCAmelCase__ : float, UpperCAmelCase__ : float ) ->float:
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def _lowerCAmelCase ( UpperCAmelCase__ : float, UpperCAmelCase__ : float, UpperCAmelCase__ : float ) ->float:
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 296
|
"""simple docstring"""
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[str] , snake_case : Tuple , snake_case : List[str]=2 , snake_case : List[str]=8 , snake_case : List[Any]=True , snake_case : Optional[Any]=True , snake_case : List[Any]=True , snake_case : Dict=True , snake_case : Tuple=99 , snake_case : Dict=16 , snake_case : Dict=5 , snake_case : int=2 , snake_case : Any=36 , snake_case : str="gelu" , snake_case : Dict=0.0 , snake_case : List[Any]=0.0 , snake_case : int=512 , snake_case : List[Any]=16 , snake_case : Tuple=2 , snake_case : Any=0.02 , snake_case : Optional[Any]=3 , snake_case : List[Any]=4 , snake_case : str=None , ):
'''simple docstring'''
A__ : Union[str, Any] = parent
A__ : Optional[Any] = batch_size
A__ : Dict = seq_length
A__ : str = is_training
A__ : Tuple = use_input_mask
A__ : Dict = use_token_type_ids
A__ : Dict = use_labels
A__ : int = vocab_size
A__ : List[str] = hidden_size
A__ : Union[str, Any] = num_hidden_layers
A__ : int = num_attention_heads
A__ : List[str] = intermediate_size
A__ : int = hidden_act
A__ : str = hidden_dropout_prob
A__ : Tuple = attention_probs_dropout_prob
A__ : Any = max_position_embeddings
A__ : Optional[int] = type_vocab_size
A__ : int = type_sequence_label_size
A__ : Optional[Any] = initializer_range
A__ : int = num_labels
A__ : Optional[int] = num_choices
A__ : Optional[int] = scope
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
A__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Any = None
if self.use_input_mask:
A__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
A__ : Optional[int] = None
if self.use_token_type_ids:
A__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ : Dict = None
A__ : List[str] = None
A__ : Union[str, Any] = None
if self.use_labels:
A__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : Any = ids_tensor([self.batch_size] , self.num_choices )
A__ : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : Any = self.get_config()
A__ : List[str] = 300
return config
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : Tuple = self.prepare_config_and_inputs()
A__ : List[str] = True
A__ : List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A__ : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _UpperCamelCase ( self : Any , snake_case : Any , snake_case : Tuple , snake_case : Any , snake_case : Optional[int] , snake_case : Optional[int] , snake_case : Optional[int] , snake_case : Dict ):
'''simple docstring'''
A__ : List[str] = MraModel(config=snake_case )
model.to(snake_case )
model.eval()
A__ : Dict = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )
A__ : List[str] = model(snake_case , token_type_ids=snake_case )
A__ : Union[str, Any] = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : Optional[Any] , snake_case : List[Any] , snake_case : Any , snake_case : Optional[Any] , snake_case : Union[str, Any] , snake_case : Tuple , snake_case : Dict , snake_case : str , snake_case : Dict , snake_case : str , ):
'''simple docstring'''
A__ : Dict = True
A__ : Optional[Any] = MraModel(snake_case )
model.to(snake_case )
model.eval()
A__ : Union[str, Any] = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , )
A__ : str = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , encoder_hidden_states=snake_case , )
A__ : Optional[int] = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : int , snake_case : Optional[Any] , snake_case : Optional[int] , snake_case : int , snake_case : str , snake_case : Union[str, Any] , snake_case : Dict , snake_case : List[str] ):
'''simple docstring'''
A__ : Union[str, Any] = MraForMaskedLM(config=snake_case )
model.to(snake_case )
model.eval()
A__ : List[Any] = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self : Optional[Any] , snake_case : Dict , snake_case : Dict , snake_case : Dict , snake_case : List[str] , snake_case : List[str] , snake_case : Tuple , snake_case : Union[str, Any] ):
'''simple docstring'''
A__ : Dict = MraForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
A__ : str = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , start_positions=snake_case , end_positions=snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self : Tuple , snake_case : List[Any] , snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : List[str] , snake_case : Optional[int] , snake_case : List[str] , snake_case : Union[str, Any] ):
'''simple docstring'''
A__ : str = self.num_labels
A__ : Optional[Any] = MraForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
A__ : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self : Union[str, Any] , snake_case : Dict , snake_case : str , snake_case : List[Any] , snake_case : Any , snake_case : Dict , snake_case : Tuple , snake_case : Optional[Any] ):
'''simple docstring'''
A__ : str = self.num_labels
A__ : Union[str, Any] = MraForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
A__ : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self : Tuple , snake_case : Optional[Any] , snake_case : Optional[int] , snake_case : int , snake_case : Optional[Any] , snake_case : List[str] , snake_case : Dict , snake_case : Optional[Any] ):
'''simple docstring'''
A__ : List[str] = self.num_choices
A__ : str = MraForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
A__ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : str = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : List[str] = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : Dict = config_and_inputs
A__ : Optional[int] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase ):
snake_case_ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = ()
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : Optional[Any] = MraModelTester(self )
A__ : List[str] = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A__ : List[str] = type
self.model_tester.create_and_check_model(*snake_case )
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case )
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case )
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
A__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case )
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case )
@slow
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : str = MraModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@unittest.skip(reason="""MRA does not output attentions""" )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
return
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : str = MraModel.from_pretrained("""uw-madison/mra-base-512-4""" )
A__ : Any = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
A__ : List[Any] = model(snake_case )[0]
A__ : List[Any] = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , snake_case )
A__ : int = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) )
@slow
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : Union[str, Any] = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-512-4""" )
A__ : Tuple = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
A__ : List[Any] = model(snake_case )[0]
A__ : Dict = 5_0265
A__ : List[str] = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , snake_case )
A__ : List[Any] = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) )
@slow
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
A__ : Any = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-4096-8-d3""" )
A__ : List[Any] = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
A__ : List[Any] = model(snake_case )[0]
A__ : Union[str, Any] = 5_0265
A__ : Optional[Any] = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape , snake_case )
A__ : Optional[int] = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) )
| 296
| 1
|
"""simple docstring"""
import os
from distutils.util import strtobool
def _lowerCAmelCase ( UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Optional[Any] ) ->List[str]:
for e in env_keys:
A__ : List[Any] = int(os.environ.get(UpperCAmelCase__, -1 ) )
if val >= 0:
return val
return default
def _lowerCAmelCase ( UpperCAmelCase__ : Tuple, UpperCAmelCase__ : str=False ) ->List[str]:
A__ : List[Any] = os.environ.get(UpperCAmelCase__, str(UpperCAmelCase__ ) )
return strtobool(UpperCAmelCase__ ) == 1 # As its name indicates `strtobool` actually returns an int...
def _lowerCAmelCase ( UpperCAmelCase__ : Tuple, UpperCAmelCase__ : List[Any]="no" ) ->int:
A__ : str = os.environ.get(UpperCAmelCase__, str(UpperCAmelCase__ ) )
return value
| 296
|
"""simple docstring"""
from sklearn.metrics import mean_squared_error
import datasets
A_ = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
A_ = '''\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
'''
A_ = '''
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric("mse")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{\'mse\': 0.6123724356957945}
If you\'re using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric("mse", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mse\': array([0.41666667, 1. ])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"""
] , )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("""float""" ) ),
"references": datasets.Sequence(datasets.Value("""float""" ) ),
}
else:
return {
"predictions": datasets.Value("""float""" ),
"references": datasets.Value("""float""" ),
}
def _UpperCamelCase ( self : List[str] , snake_case : Dict , snake_case : List[Any] , snake_case : List[str]=None , snake_case : List[Any]="uniform_average" , snake_case : int=True ):
'''simple docstring'''
A__ : Optional[int] = mean_squared_error(
snake_case , snake_case , sample_weight=snake_case , multioutput=snake_case , squared=snake_case )
return {"mse": mse}
| 296
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
A_ = None
A_ = logging.get_logger(__name__)
A_ = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
A_ = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
},
'''tokenizer_file''': {
'''google/bigbird-roberta-base''': (
'''https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'''
),
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'''
),
},
}
A_ = {
'''google/bigbird-roberta-base''': 4096,
'''google/bigbird-roberta-large''': 4096,
'''google/bigbird-base-trivia-itc''': 4096,
}
A_ = '''▁'''
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = BigBirdTokenizer
snake_case_ = ['input_ids', 'attention_mask']
snake_case_ = []
def __init__( self : List[str] , snake_case : List[Any]=None , snake_case : List[Any]=None , snake_case : Dict="<unk>" , snake_case : Optional[int]="<s>" , snake_case : str="</s>" , snake_case : str="<pad>" , snake_case : Union[str, Any]="[SEP]" , snake_case : Union[str, Any]="[MASK]" , snake_case : Union[str, Any]="[CLS]" , **snake_case : List[Any] , ):
'''simple docstring'''
A__ : Tuple = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else bos_token
A__ : List[Any] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else eos_token
A__ : int = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else unk_token
A__ : List[Any] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else pad_token
A__ : Union[str, Any] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else cls_token
A__ : Optional[int] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
A__ : Tuple = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else mask_token
super().__init__(
snake_case , tokenizer_file=snake_case , bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , **snake_case , )
A__ : Optional[Any] = vocab_file
A__ : Dict = False if not self.vocab_file else True
def _UpperCamelCase ( self : Optional[int] , snake_case : List[int] , snake_case : Optional[List[int]] = None ):
'''simple docstring'''
A__ : Union[str, Any] = [self.sep_token_id]
A__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _UpperCamelCase ( self : str , snake_case : List[int] , snake_case : Optional[List[int]] = None , snake_case : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(snake_case )) + [1]
return [1] + ([0] * len(snake_case )) + [1] + ([0] * len(snake_case )) + [1]
def _UpperCamelCase ( self : List[Any] , snake_case : List[int] , snake_case : Optional[List[int]] = None ):
'''simple docstring'''
A__ : Any = [self.sep_token_id]
A__ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCamelCase ( self : Tuple , snake_case : str , snake_case : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(snake_case ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A__ : str = os.path.join(
snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ):
copyfile(self.vocab_file , snake_case )
return (out_vocab_file,)
| 296
|
"""simple docstring"""
import warnings
from ..trainer import Trainer
from ..utils import logging
A_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
def __init__( self : Optional[int] , snake_case : List[str]=None , **snake_case : Any ):
'''simple docstring'''
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""" , snake_case , )
super().__init__(args=snake_case , **snake_case )
| 296
| 1
|
"""simple docstring"""
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def _lowerCAmelCase ( ) ->Optional[Any]:
A__ : Optional[Any] = {
"""repo_name""": ["""test_repo1""", """test_repo2""", """test_repo3"""],
"""path""": ["""test_1.py""", """test_2.py""", """unit_test.py"""],
"""content""": ["""a """ * 2_0, """a """ * 3_0, """b """ * 7],
}
A__ : List[str] = Dataset.from_dict(UpperCAmelCase__ )
return dataset
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ : Optional[Any] = get_dataset()
A__ : Optional[Any] = make_duplicate_clusters(snake_case , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
A__ : Optional[Any] = get_dataset()
A__ , A__ : Dict = deduplicate_dataset(snake_case )
self.assertEqual(len(snake_case ) , 2 )
print(snake_case )
self.assertEqual(duplicate_clusters[0][0]["""copies"""] , 2 )
self.assertEqual(duplicate_clusters[0][0]["""is_extreme"""] , snake_case )
| 296
|
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
A_ = random.Random()
def _lowerCAmelCase ( UpperCAmelCase__ : Dict, UpperCAmelCase__ : Tuple=1.0, UpperCAmelCase__ : Optional[int]=None, UpperCAmelCase__ : str=None ) ->Union[str, Any]:
if rng is None:
A__ : Optional[int] = global_rng
A__ : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Union[str, Any] , snake_case : str , snake_case : List[str]=7 , snake_case : str=400 , snake_case : Optional[Any]=2000 , snake_case : Union[str, Any]=10 , snake_case : str=160 , snake_case : List[str]=8 , snake_case : List[Any]=0.0 , snake_case : Optional[Any]=4000 , snake_case : Any=False , snake_case : int=True , ):
'''simple docstring'''
A__ : Any = parent
A__ : str = batch_size
A__ : List[str] = min_seq_length
A__ : Dict = max_seq_length
A__ : str = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A__ : Dict = padding_value
A__ : Optional[Any] = sampling_rate
A__ : Any = return_attention_mask
A__ : Optional[int] = do_normalize
A__ : Tuple = feature_size
A__ : Optional[Any] = chunk_length
A__ : Union[str, Any] = hop_length
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _UpperCamelCase ( self : Union[str, Any] , snake_case : Dict=False , snake_case : Optional[Any]=False ):
'''simple docstring'''
def _flatten(snake_case : Dict ):
return list(itertools.chain(*snake_case ) )
if equal_length:
A__ : Dict = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
A__ : Optional[int] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
A__ : List[str] = [np.asarray(snake_case ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase ):
snake_case_ = WhisperFeatureExtractor if is_speech_available() else None
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
A__ : str = WhisperFeatureExtractionTester(self )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : List[Any] = feat_extract_first.save_pretrained(snake_case )[0]
check_json_file_has_correct_format(snake_case )
A__ : Union[str, Any] = self.feature_extraction_class.from_pretrained(snake_case )
A__ : str = feat_extract_first.to_dict()
A__ : Union[str, Any] = feat_extract_second.to_dict()
A__ : List[Any] = feat_extract_first.mel_filters
A__ : Optional[Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(snake_case , snake_case ) )
self.assertEqual(snake_case , snake_case )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Any = os.path.join(snake_case , """feat_extract.json""" )
feat_extract_first.to_json_file(snake_case )
A__ : int = self.feature_extraction_class.from_json_file(snake_case )
A__ : Dict = feat_extract_first.to_dict()
A__ : str = feat_extract_second.to_dict()
A__ : str = feat_extract_first.mel_filters
A__ : Dict = feat_extract_second.mel_filters
self.assertTrue(np.allclose(snake_case , snake_case ) )
self.assertEqual(snake_case , snake_case )
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
A__ : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
A__ : str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
A__ : Union[str, Any] = [np.asarray(snake_case ) for speech_input in speech_inputs]
# Test feature size
A__ : Dict = feature_extractor(snake_case , padding="""max_length""" , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
A__ : str = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
A__ : Optional[int] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(snake_case , snake_case , atol=1e-3 ) )
# Test batched
A__ : List[str] = feature_extractor(snake_case , return_tensors="""np""" ).input_features
A__ : List[str] = feature_extractor(snake_case , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(snake_case , snake_case ):
self.assertTrue(np.allclose(snake_case , snake_case , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
A__ : Tuple = [floats_list((1, x) )[0] for x in (800, 800, 800)]
A__ : str = np.asarray(snake_case )
A__ : List[str] = feature_extractor(snake_case , return_tensors="""np""" ).input_features
A__ : Optional[int] = feature_extractor(snake_case , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(snake_case , snake_case ):
self.assertTrue(np.allclose(snake_case , snake_case , atol=1e-3 ) )
# Test truncation required
A__ : Optional[Any] = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
A__ : Union[str, Any] = [np.asarray(snake_case ) for speech_input in speech_inputs]
A__ : Union[str, Any] = [x[: feature_extractor.n_samples] for x in speech_inputs]
A__ : str = [np.asarray(snake_case ) for speech_input in speech_inputs_truncated]
A__ : Optional[int] = feature_extractor(snake_case , return_tensors="""np""" ).input_features
A__ : str = feature_extractor(snake_case , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(snake_case , snake_case ):
self.assertTrue(np.allclose(snake_case , snake_case , atol=1e-3 ) )
def _UpperCamelCase ( self : str ):
'''simple docstring'''
import torch
A__ : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ : List[str] = np.random.rand(100 , 32 ).astype(np.floataa )
A__ : Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
A__ : Optional[Any] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
A__ : Optional[int] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def _UpperCamelCase ( self : Optional[Any] , snake_case : Optional[int] ):
'''simple docstring'''
A__ : int = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
A__ : Union[str, Any] = ds.sort("""id""" ).select(range(snake_case ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : str = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
A__ : Optional[Any] = self._load_datasamples(1 )
A__ : Union[str, Any] = WhisperFeatureExtractor()
A__ : List[str] = feature_extractor(snake_case , return_tensors="""pt""" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , snake_case , atol=1e-4 ) )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ : Union[str, Any] = self._load_datasamples(1 )[0]
A__ : Any = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
A__ : str = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=snake_case )[0]
self.assertTrue(np.all(np.mean(snake_case ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(snake_case ) - 1 ) < 1e-3 ) )
| 296
| 1
|
"""simple docstring"""
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
A_ = logging.get_logger(__name__)
# General docstring
A_ = '''PoolFormerConfig'''
# Base docstring
A_ = '''sail/poolformer_s12'''
A_ = [1, 512, 7, 7]
# Image classification docstring
A_ = '''sail/poolformer_s12'''
A_ = '''tabby, tabby cat'''
A_ = [
'''sail/poolformer_s12''',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def _lowerCAmelCase ( UpperCAmelCase__ : Dict, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : bool = False ) ->Union[str, Any]:
if drop_prob == 0.0 or not training:
return input
A__ : Optional[int] = 1 - drop_prob
A__ : Optional[Any] = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
A__ : Optional[Any] = keep_prob + torch.rand(UpperCAmelCase__, dtype=input.dtype, device=input.device )
random_tensor.floor_() # binarize
A__ : int = input.div(UpperCAmelCase__ ) * random_tensor
return output
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : Optional[Any] , snake_case : Optional[float] = None ):
'''simple docstring'''
super().__init__()
A__ : int = drop_prob
def _UpperCamelCase ( self : List[Any] , snake_case : torch.Tensor ):
'''simple docstring'''
return drop_path(snake_case , self.drop_prob , self.training )
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
return "p={}".format(self.drop_prob )
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : Union[str, Any] , snake_case : Optional[Any] , snake_case : Optional[int] , snake_case : List[Any] , snake_case : Optional[Any] , snake_case : Union[str, Any] , snake_case : Dict=None ):
'''simple docstring'''
super().__init__()
A__ : str = patch_size if isinstance(snake_case , collections.abc.Iterable ) else (patch_size, patch_size)
A__ : Any = stride if isinstance(snake_case , collections.abc.Iterable ) else (stride, stride)
A__ : Dict = padding if isinstance(snake_case , collections.abc.Iterable ) else (padding, padding)
A__ : List[str] = nn.Convad(snake_case , snake_case , kernel_size=snake_case , stride=snake_case , padding=snake_case )
A__ : int = norm_layer(snake_case ) if norm_layer else nn.Identity()
def _UpperCamelCase ( self : int , snake_case : Dict ):
'''simple docstring'''
A__ : Tuple = self.projection(snake_case )
A__ : List[Any] = self.norm(snake_case )
return embeddings
class __SCREAMING_SNAKE_CASE ( nn.GroupNorm ):
def __init__( self : str , snake_case : str , **snake_case : Optional[Any] ):
'''simple docstring'''
super().__init__(1 , snake_case , **snake_case )
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : int , snake_case : Dict ):
'''simple docstring'''
super().__init__()
A__ : Optional[int] = nn.AvgPoolad(snake_case , stride=1 , padding=pool_size // 2 , count_include_pad=snake_case )
def _UpperCamelCase ( self : Optional[Any] , snake_case : Optional[int] ):
'''simple docstring'''
return self.pool(snake_case ) - hidden_states
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : Optional[Any] , snake_case : List[str] , snake_case : Union[str, Any] , snake_case : Any , snake_case : Optional[Any] ):
'''simple docstring'''
super().__init__()
A__ : Any = nn.Convad(snake_case , snake_case , 1 )
A__ : Dict = nn.Convad(snake_case , snake_case , 1 )
A__ : List[Any] = PoolFormerDropPath(snake_case )
if isinstance(config.hidden_act , snake_case ):
A__ : Optional[int] = ACTaFN[config.hidden_act]
else:
A__ : int = config.hidden_act
def _UpperCamelCase ( self : Optional[int] , snake_case : Tuple ):
'''simple docstring'''
A__ : Dict = self.conva(snake_case )
A__ : Union[str, Any] = self.act_fn(snake_case )
A__ : Dict = self.drop(snake_case )
A__ : Tuple = self.conva(snake_case )
A__ : Dict = self.drop(snake_case )
return hidden_states
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : Union[str, Any] , snake_case : Any , snake_case : int , snake_case : Tuple , snake_case : List[Any] , snake_case : List[str] , snake_case : Dict ):
'''simple docstring'''
super().__init__()
A__ : Any = PoolFormerPooling(snake_case )
A__ : Tuple = PoolFormerOutput(snake_case , snake_case , snake_case , snake_case )
A__ : Dict = PoolFormerGroupNorm(snake_case )
A__ : Any = PoolFormerGroupNorm(snake_case )
# Useful for training neural nets
A__ : Dict = PoolFormerDropPath(snake_case ) if drop_path > 0.0 else nn.Identity()
A__ : str = config.use_layer_scale
if config.use_layer_scale:
A__ : Tuple = nn.Parameter(
config.layer_scale_init_value * torch.ones((snake_case) ) , requires_grad=snake_case )
A__ : int = nn.Parameter(
config.layer_scale_init_value * torch.ones((snake_case) ) , requires_grad=snake_case )
def _UpperCamelCase ( self : int , snake_case : Union[str, Any] ):
'''simple docstring'''
if self.use_layer_scale:
A__ : List[str] = self.pooling(self.before_norm(snake_case ) )
A__ : Union[str, Any] = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
A__ : Tuple = hidden_states + self.drop_path(snake_case )
A__ : Dict = ()
A__ : List[Any] = self.output(self.after_norm(snake_case ) )
A__ : List[Any] = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
A__ : int = hidden_states + self.drop_path(snake_case )
A__ : Union[str, Any] = (output,) + outputs
return outputs
else:
A__ : Tuple = self.drop_path(self.pooling(self.before_norm(snake_case ) ) )
# First residual connection
A__ : str = pooling_output + hidden_states
A__ : Any = ()
# Second residual connection inside the PoolFormerOutput block
A__ : Dict = self.drop_path(self.output(self.after_norm(snake_case ) ) )
A__ : List[Any] = hidden_states + layer_output
A__ : Any = (output,) + outputs
return outputs
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : Optional[int] , snake_case : Optional[int] ):
'''simple docstring'''
super().__init__()
A__ : Optional[Any] = config
# stochastic depth decay rule
A__ : Dict = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
A__ : Optional[int] = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
A__ : Union[str, Any] = nn.ModuleList(snake_case )
# Transformer blocks
A__ : Dict = []
A__ : Union[str, Any] = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
A__ : List[Any] = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
snake_case , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(snake_case ) )
A__ : List[str] = nn.ModuleList(snake_case )
def _UpperCamelCase ( self : Any , snake_case : Tuple , snake_case : Optional[int]=False , snake_case : Any=True ):
'''simple docstring'''
A__ : Optional[Any] = () if output_hidden_states else None
A__ : str = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
A__ , A__ : List[Any] = layers
# Get patch embeddings from hidden_states
A__ : Optional[int] = embedding_layer(snake_case )
# Send the embeddings through the blocks
for _, blk in enumerate(snake_case ):
A__ : List[Any] = blk(snake_case )
A__ : Union[str, Any] = layer_outputs[0]
if output_hidden_states:
A__ : Union[str, Any] = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=snake_case , hidden_states=snake_case )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = PoolFormerConfig
snake_case_ = 'poolformer'
snake_case_ = 'pixel_values'
snake_case_ = True
def _UpperCamelCase ( self : Optional[Any] , snake_case : Union[str, Any] ):
'''simple docstring'''
if isinstance(snake_case , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(snake_case , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def _UpperCamelCase ( self : Union[str, Any] , snake_case : str , snake_case : List[Any]=False ):
'''simple docstring'''
if isinstance(snake_case , snake_case ):
A__ : Union[str, Any] = value
A_ = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
A_ = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`PoolFormerImageProcessor.__call__`] for details.
'''
@add_start_docstrings(
'The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.' , UpperCamelCase , )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
def __init__( self : Optional[Any] , snake_case : Optional[Any] ):
'''simple docstring'''
super().__init__(snake_case )
A__ : Optional[int] = config
A__ : int = PoolFormerEncoder(snake_case )
# Initialize weights and apply final processing
self.post_init()
def _UpperCamelCase ( self : str ):
'''simple docstring'''
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(snake_case )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _UpperCamelCase ( self : Tuple , snake_case : Optional[torch.FloatTensor] = None , snake_case : Optional[bool] = None , snake_case : Optional[bool] = None , ):
'''simple docstring'''
A__ : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A__ : str = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
A__ : Optional[int] = self.encoder(
snake_case , output_hidden_states=snake_case , return_dict=snake_case , )
A__ : Tuple = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=snake_case , hidden_states=encoder_outputs.hidden_states , )
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : Optional[int] , snake_case : Optional[int] ):
'''simple docstring'''
super().__init__()
A__ : Optional[Any] = nn.Linear(config.hidden_size , config.hidden_size )
def _UpperCamelCase ( self : Tuple , snake_case : Optional[Any] ):
'''simple docstring'''
A__ : Tuple = self.dense(snake_case )
return output
@add_start_docstrings(
'\n PoolFormer Model transformer with an image classification head on top\n ' , UpperCamelCase , )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
def __init__( self : Dict , snake_case : Dict ):
'''simple docstring'''
super().__init__(snake_case )
A__ : List[str] = config.num_labels
A__ : Optional[int] = PoolFormerModel(snake_case )
# Final norm
A__ : Any = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
A__ : Any = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(snake_case )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _UpperCamelCase ( self : List[Any] , snake_case : Optional[torch.FloatTensor] = None , snake_case : Optional[torch.LongTensor] = None , snake_case : Optional[bool] = None , snake_case : Optional[bool] = None , ):
'''simple docstring'''
A__ : int = return_dict if return_dict is not None else self.config.use_return_dict
A__ : Any = self.poolformer(
snake_case , output_hidden_states=snake_case , return_dict=snake_case , )
A__ : List[str] = outputs[0]
A__ : Dict = self.classifier(self.norm(snake_case ).mean([-2, -1] ) )
A__ : List[str] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
A__ : str = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
A__ : Tuple = """single_label_classification"""
else:
A__ : int = """multi_label_classification"""
if self.config.problem_type == "regression":
A__ : Any = MSELoss()
if self.num_labels == 1:
A__ : str = loss_fct(logits.squeeze() , labels.squeeze() )
else:
A__ : str = loss_fct(snake_case , snake_case )
elif self.config.problem_type == "single_label_classification":
A__ : Optional[Any] = CrossEntropyLoss()
A__ : Optional[Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
A__ : Union[str, Any] = BCEWithLogitsLoss()
A__ : Tuple = loss_fct(snake_case , snake_case )
if not return_dict:
A__ : Union[str, Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=snake_case , logits=snake_case , hidden_states=outputs.hidden_states )
| 296
|
"""simple docstring"""
import numpy as np
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[str] ):
'''simple docstring'''
A__ : Optional[int] = (0, 0)
A__ : Dict = None
A__ : int = 0
A__ : str = 0
A__ : Optional[Any] = 0
def __eq__( self : str , snake_case : Optional[int] ):
'''simple docstring'''
return self.position == cell.position
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
print(self.position )
class __SCREAMING_SNAKE_CASE :
def __init__( self : int , snake_case : Any=(5, 5) ):
'''simple docstring'''
A__ : Optional[int] = np.zeros(snake_case )
A__ : List[Any] = world_size[0]
A__ : Dict = world_size[1]
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
print(self.w )
def _UpperCamelCase ( self : Optional[int] , snake_case : List[Any] ):
'''simple docstring'''
A__ : int = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
A__ : int = cell.position[0]
A__ : str = cell.position[1]
A__ : Any = []
for n in neughbour_cord:
A__ : List[Any] = current_x + n[0]
A__ : Tuple = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
A__ : List[Any] = Cell()
A__ : str = (x, y)
A__ : Optional[Any] = cell
neighbours.append(snake_case )
return neighbours
def _lowerCAmelCase ( UpperCAmelCase__ : List[str], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Dict ) ->Dict:
A__ : Union[str, Any] = []
A__ : Optional[int] = []
_open.append(UpperCAmelCase__ )
while _open:
A__ : List[Any] = np.argmin([n.f for n in _open] )
A__ : Union[str, Any] = _open[min_f]
_closed.append(_open.pop(UpperCAmelCase__ ) )
if current == goal:
break
for n in world.get_neigbours(UpperCAmelCase__ ):
for c in _closed:
if c == n:
continue
A__ : Dict = current.g + 1
A__ , A__ : int = n.position
A__ , A__ : Optional[int] = goal.position
A__ : Union[str, Any] = (ya - ya) ** 2 + (xa - xa) ** 2
A__ : Optional[int] = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(UpperCAmelCase__ )
A__ : List[str] = []
while current.parent is not None:
path.append(current.position )
A__ : Union[str, Any] = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
A_ = Gridworld()
# Start position and goal
A_ = Cell()
A_ = (0, 0)
A_ = Cell()
A_ = (4, 4)
print(F'path from {start.position} to {goal.position}')
A_ = astar(world, start, goal)
# Just for visual reasons.
for i in s:
A_ = 1
print(world.w)
| 296
| 1
|
"""simple docstring"""
import os
import sys
import unittest
A_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
A_ = os.path.join(git_repo_path, '''src''', '''transformers''')
A_ = '''
{0} = None
'''
A_ = '''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
'''
A_ = '''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : str = find_backend(""" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")""" )
self.assertIsNone(snake_case )
A__ : Optional[Any] = find_backend(""" if not is_tokenizers_available():""" )
self.assertEqual(snake_case , """tokenizers""" )
A__ : Dict = find_backend(""" if not is_tensorflow_text_available():""" )
self.assertEqual(snake_case , """tensorflow_text""" )
A__ : Optional[Any] = find_backend(""" if not (is_sentencepiece_available() and is_tokenizers_available()):""" )
self.assertEqual(snake_case , """sentencepiece_and_tokenizers""" )
A__ : int = find_backend(
""" if not (is_sentencepiece_available() and is_tensorflow_text_available()):""" )
self.assertEqual(snake_case , """sentencepiece_and_tensorflow_text""" )
A__ : List[str] = find_backend(
""" if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):""" )
self.assertEqual(snake_case , """sentencepiece_and_tokenizers_and_vision""" )
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
A__ : Optional[int] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""" , snake_case )
self.assertIn("""tensorflow_text""" , snake_case )
self.assertIn("""sentencepiece_and_tokenizers""" , snake_case )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""BertModel""" , objects["""torch"""] )
self.assertIn("""TFBertModel""" , objects["""tf"""] )
self.assertIn("""FlaxBertModel""" , objects["""flax"""] )
self.assertIn("""BertModel""" , objects["""torch"""] )
self.assertIn("""TFBertTokenizer""" , objects["""tensorflow_text"""] )
self.assertIn("""convert_slow_tokenizer""" , objects["""sentencepiece_and_tokenizers"""] )
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : int = create_dummy_object("""CONSTANT""" , """'torch'""" )
self.assertEqual(snake_case , """\nCONSTANT = None\n""" )
A__ : List[Any] = create_dummy_object("""function""" , """'torch'""" )
self.assertEqual(
snake_case , """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
A__ : Union[str, Any] = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
"""
A__ : Tuple = create_dummy_object("""FakeClass""" , """'torch'""" )
self.assertEqual(snake_case , snake_case )
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
A__ : Optional[Any] = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
"""
A__ : Union[str, Any] = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""] , snake_case )
| 296
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
def _lowerCAmelCase ( UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Tuple=False ) ->str:
A__ : Optional[int] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'blocks.{i}.norm1.weight', f'deit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'blocks.{i}.norm1.bias', f'deit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((f'blocks.{i}.attn.proj.weight', f'deit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((f'blocks.{i}.attn.proj.bias', f'deit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'blocks.{i}.norm2.weight', f'deit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'blocks.{i}.norm2.bias', f'deit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'deit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'deit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'deit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'deit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """deit.embeddings.cls_token"""),
("""dist_token""", """deit.embeddings.distillation_token"""),
("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """deit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
A__ : Optional[int] = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("""norm.weight""", """deit.layernorm.weight"""),
("""norm.bias""", """deit.layernorm.bias"""),
("""head.weight""", """cls_classifier.weight"""),
("""head.bias""", """cls_classifier.bias"""),
("""head_dist.weight""", """distillation_classifier.weight"""),
("""head_dist.bias""", """distillation_classifier.bias"""),
] )
return rename_keys
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Tuple, UpperCAmelCase__ : List[Any]=False ) ->str:
for i in range(config.num_hidden_layers ):
if base_model:
A__ : Any = """"""
else:
A__ : Tuple = """deit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ : Any = state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
A__ : Tuple = state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
A__ : List[Any] = in_proj_weight[
: config.hidden_size, :
]
A__ : str = in_proj_bias[: config.hidden_size]
A__ : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
A__ : Any = in_proj_bias[-config.hidden_size :]
def _lowerCAmelCase ( UpperCAmelCase__ : List[Any], UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Union[str, Any] ) ->Any:
A__ : int = dct.pop(UpperCAmelCase__ )
A__ : Tuple = val
def _lowerCAmelCase ( ) ->List[Any]:
A__ : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A__ : int = Image.open(requests.get(UpperCAmelCase__, stream=UpperCAmelCase__ ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase ( UpperCAmelCase__ : Dict, UpperCAmelCase__ : Any ) ->Tuple:
A__ : List[Any] = DeiTConfig()
# all deit models have fine-tuned heads
A__ : Tuple = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
A__ : str = 1_0_0_0
A__ : List[str] = """huggingface/label-files"""
A__ : Dict = """imagenet-1k-id2label.json"""
A__ : List[str] = json.load(open(hf_hub_download(UpperCAmelCase__, UpperCAmelCase__, repo_type="""dataset""" ), """r""" ) )
A__ : Dict = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()}
A__ : Optional[int] = idalabel
A__ : Dict = {v: k for k, v in idalabel.items()}
A__ : List[str] = int(deit_name[-6:-4] )
A__ : str = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("""tiny""" ):
A__ : List[str] = 1_9_2
A__ : int = 7_6_8
A__ : List[Any] = 1_2
A__ : Dict = 3
elif deit_name[9:].startswith("""small""" ):
A__ : List[Any] = 3_8_4
A__ : List[str] = 1_5_3_6
A__ : Any = 1_2
A__ : Union[str, Any] = 6
if deit_name[9:].startswith("""base""" ):
pass
elif deit_name[4:].startswith("""large""" ):
A__ : int = 1_0_2_4
A__ : str = 4_0_9_6
A__ : Any = 2_4
A__ : int = 1_6
# load original model from timm
A__ : Dict = timm.create_model(UpperCAmelCase__, pretrained=UpperCAmelCase__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A__ : Tuple = timm_model.state_dict()
A__ : str = create_rename_keys(UpperCAmelCase__, UpperCAmelCase__ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
read_in_q_k_v(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
# load HuggingFace model
A__ : str = DeiTForImageClassificationWithTeacher(UpperCAmelCase__ ).eval()
model.load_state_dict(UpperCAmelCase__ )
# Check outputs on an image, prepared by DeiTImageProcessor
A__ : int = int(
(2_5_6 / 2_2_4) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
A__ : Any = DeiTImageProcessor(size=UpperCAmelCase__, crop_size=config.image_size )
A__ : Union[str, Any] = image_processor(images=prepare_img(), return_tensors="""pt""" )
A__ : Optional[Any] = encoding["""pixel_values"""]
A__ : Union[str, Any] = model(UpperCAmelCase__ )
A__ : Union[str, Any] = timm_model(UpperCAmelCase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(UpperCAmelCase__, outputs.logits, atol=1e-3 )
Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ )
print(f'Saving model {deit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCAmelCase__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--deit_name''',
default='''vit_deit_base_distilled_patch16_224''',
type=str,
help='''Name of the DeiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
A_ = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 296
| 1
|
"""simple docstring"""
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="""session""" )
def _lowerCAmelCase ( ) ->Union[str, Any]:
A__ : Union[str, Any] = 1_0
A__ : str = datasets.Features(
{
"""tokens""": datasets.Sequence(datasets.Value("""string""" ) ),
"""labels""": datasets.Sequence(datasets.ClassLabel(names=["""negative""", """positive"""] ) ),
"""answers""": datasets.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
"""id""": datasets.Value("""int64""" ),
} )
A__ : List[str] = datasets.Dataset.from_dict(
{
"""tokens""": [["""foo"""] * 5] * n,
"""labels""": [[1] * 5] * n,
"""answers""": [{"""answer_start""": [9_7], """text""": ["""1976"""]}] * 1_0,
"""id""": list(range(UpperCAmelCase__ ) ),
}, features=UpperCAmelCase__, )
return dataset
@pytest.fixture(scope="""session""" )
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Optional[Any] ) ->List[str]:
A__ : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """file.arrow""" )
dataset.map(cache_file_name=UpperCAmelCase__ )
return filename
# FILE_CONTENT + files
A_ = '''\
Text data.
Second line of data.'''
@pytest.fixture(scope="""session""" )
def _lowerCAmelCase ( UpperCAmelCase__ : Union[str, Any] ) ->Optional[Any]:
A__ : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """file.txt"""
A__ : str = FILE_CONTENT
with open(UpperCAmelCase__, """w""" ) as f:
f.write(UpperCAmelCase__ )
return filename
@pytest.fixture(scope="""session""" )
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[int] ) ->int:
import bza
A__ : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """file.txt.bz2"""
A__ : Optional[Any] = bytes(UpperCAmelCase__, """utf-8""" )
with bza.open(UpperCAmelCase__, """wb""" ) as f:
f.write(UpperCAmelCase__ )
return path
@pytest.fixture(scope="""session""" )
def _lowerCAmelCase ( UpperCAmelCase__ : str ) ->Optional[Any]:
import gzip
A__ : str = str(tmp_path_factory.mktemp("""data""" ) / """file.txt.gz""" )
A__ : List[str] = bytes(UpperCAmelCase__, """utf-8""" )
with gzip.open(UpperCAmelCase__, """wb""" ) as f:
f.write(UpperCAmelCase__ )
return path
@pytest.fixture(scope="""session""" )
def _lowerCAmelCase ( UpperCAmelCase__ : List[Any] ) ->Optional[Any]:
if datasets.config.LZ4_AVAILABLE:
import lza.frame
A__ : int = tmp_path_factory.mktemp("""data""" ) / """file.txt.lz4"""
A__ : int = bytes(UpperCAmelCase__, """utf-8""" )
with lza.frame.open(UpperCAmelCase__, """wb""" ) as f:
f.write(UpperCAmelCase__ )
return path
@pytest.fixture(scope="""session""" )
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : List[str] ) ->Optional[Any]:
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
A__ : Dict = tmp_path_factory.mktemp("""data""" ) / """file.txt.7z"""
with pyazr.SevenZipFile(UpperCAmelCase__, """w""" ) as archive:
archive.write(UpperCAmelCase__, arcname=os.path.basename(UpperCAmelCase__ ) )
return path
@pytest.fixture(scope="""session""" )
def _lowerCAmelCase ( UpperCAmelCase__ : Any, UpperCAmelCase__ : int ) ->Union[str, Any]:
import tarfile
A__ : List[str] = tmp_path_factory.mktemp("""data""" ) / """file.txt.tar"""
with tarfile.TarFile(UpperCAmelCase__, """w""" ) as f:
f.add(UpperCAmelCase__, arcname=os.path.basename(UpperCAmelCase__ ) )
return path
@pytest.fixture(scope="""session""" )
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[int] ) ->Union[str, Any]:
import lzma
A__ : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """file.txt.xz"""
A__ : str = bytes(UpperCAmelCase__, """utf-8""" )
with lzma.open(UpperCAmelCase__, """wb""" ) as f:
f.write(UpperCAmelCase__ )
return path
@pytest.fixture(scope="""session""" )
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Any ) ->str:
import zipfile
A__ : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.zip"""
with zipfile.ZipFile(UpperCAmelCase__, """w""" ) as f:
f.write(UpperCAmelCase__, arcname=os.path.basename(UpperCAmelCase__ ) )
return path
@pytest.fixture(scope="""session""" )
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[int] ) ->Tuple:
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
A__ : List[str] = tmp_path_factory.mktemp("""data""" ) / """file.txt.zst"""
A__ : Any = bytes(UpperCAmelCase__, """utf-8""" )
with zstd.open(UpperCAmelCase__, """wb""" ) as f:
f.write(UpperCAmelCase__ )
return path
@pytest.fixture(scope="""session""" )
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[int] ) ->str:
A__ : int = tmp_path_factory.mktemp("""data""" ) / """file.xml"""
A__ : Any = textwrap.dedent(
"""\
<?xml version=\"1.0\" encoding=\"UTF-8\" ?>
<tmx version=\"1.4\">
<header segtype=\"sentence\" srclang=\"ca\" />
<body>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>""" )
with open(UpperCAmelCase__, """w""" ) as f:
f.write(UpperCAmelCase__ )
return filename
A_ = [
{'''col_1''': '''0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''3''', '''col_2''': 3, '''col_3''': 3.0},
]
A_ = [
{'''col_1''': '''4''', '''col_2''': 4, '''col_3''': 4.0},
{'''col_1''': '''5''', '''col_2''': 5, '''col_3''': 5.0},
]
A_ = {
'''col_1''': ['''0''', '''1''', '''2''', '''3'''],
'''col_2''': [0, 1, 2, 3],
'''col_3''': [0.0, 1.0, 2.0, 3.0],
}
A_ = [
{'''col_3''': 0.0, '''col_1''': '''0''', '''col_2''': 0},
{'''col_3''': 1.0, '''col_1''': '''1''', '''col_2''': 1},
]
A_ = [
{'''col_1''': '''s0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''s1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''s2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''s3''', '''col_2''': 3, '''col_3''': 3.0},
]
@pytest.fixture(scope="""session""" )
def _lowerCAmelCase ( ) ->Dict:
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="""session""" )
def _lowerCAmelCase ( UpperCAmelCase__ : Dict ) ->str:
A__ : Union[str, Any] = datasets.Dataset.from_dict(UpperCAmelCase__ )
A__ : Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset.arrow""" )
dataset.map(cache_file_name=UpperCAmelCase__ )
return path
@pytest.fixture(scope="""session""" )
def _lowerCAmelCase ( UpperCAmelCase__ : List[str] ) ->Any:
A__ : List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.sqlite""" )
with contextlib.closing(sqlitea.connect(UpperCAmelCase__ ) ) as con:
A__ : int = con.cursor()
cur.execute("""CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)""" )
for item in DATA:
cur.execute("""INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)""", tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="""session""" )
def _lowerCAmelCase ( UpperCAmelCase__ : List[Any] ) ->Dict:
A__ : Optional[int] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.csv""" )
with open(UpperCAmelCase__, """w""", newline="""""" ) as f:
A__ : Optional[Any] = csv.DictWriter(UpperCAmelCase__, fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(UpperCAmelCase__ )
return path
@pytest.fixture(scope="""session""" )
def _lowerCAmelCase ( UpperCAmelCase__ : Any ) ->List[Any]:
A__ : List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.csv""" )
with open(UpperCAmelCase__, """w""", newline="""""" ) as f:
A__ : List[str] = csv.DictWriter(UpperCAmelCase__, fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(UpperCAmelCase__ )
return path
@pytest.fixture(scope="""session""" )
def _lowerCAmelCase ( UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Tuple ) ->Tuple:
import bza
A__ : List[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.bz2"""
with open(UpperCAmelCase__, """rb""" ) as f:
A__ : Dict = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(UpperCAmelCase__, """wb""" ) as f:
f.write(UpperCAmelCase__ )
return path
@pytest.fixture(scope="""session""" )
def _lowerCAmelCase ( UpperCAmelCase__ : int, UpperCAmelCase__ : Any, UpperCAmelCase__ : Union[str, Any] ) ->List[Any]:
A__ : Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(UpperCAmelCase__, """w""" ) as f:
f.write(UpperCAmelCase__, arcname=os.path.basename(UpperCAmelCase__ ) )
f.write(UpperCAmelCase__, arcname=os.path.basename(UpperCAmelCase__ ) )
return path
@pytest.fixture(scope="""session""" )
def _lowerCAmelCase ( UpperCAmelCase__ : int, UpperCAmelCase__ : List[str], UpperCAmelCase__ : int ) ->Union[str, Any]:
A__ : Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(UpperCAmelCase__, """w""" ) as f:
f.write(UpperCAmelCase__, arcname=os.path.basename(csv_path.replace(""".csv""", """.CSV""" ) ) )
f.write(UpperCAmelCase__, arcname=os.path.basename(csva_path.replace(""".csv""", """.CSV""" ) ) )
return path
@pytest.fixture(scope="""session""" )
def _lowerCAmelCase ( UpperCAmelCase__ : List[Any], UpperCAmelCase__ : List[str], UpperCAmelCase__ : Any ) ->List[Any]:
A__ : str = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.csv.zip"""
with zipfile.ZipFile(UpperCAmelCase__, """w""" ) as f:
f.write(UpperCAmelCase__, arcname=os.path.join("""main_dir""", os.path.basename(UpperCAmelCase__ ) ) )
f.write(UpperCAmelCase__, arcname=os.path.join("""main_dir""", os.path.basename(UpperCAmelCase__ ) ) )
return path
@pytest.fixture(scope="""session""" )
def _lowerCAmelCase ( UpperCAmelCase__ : Union[str, Any] ) ->Union[str, Any]:
A__ : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """dataset.parquet""" )
A__ : Optional[int] = pa.schema(
{
"""col_1""": pa.string(),
"""col_2""": pa.intaa(),
"""col_3""": pa.floataa(),
} )
with open(UpperCAmelCase__, """wb""" ) as f:
A__ : Optional[int] = pq.ParquetWriter(UpperCAmelCase__, schema=UpperCAmelCase__ )
A__ : Any = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(UpperCAmelCase__ ) )] for k in DATA[0]}, schema=UpperCAmelCase__ )
writer.write_table(UpperCAmelCase__ )
writer.close()
return path
@pytest.fixture(scope="""session""" )
def _lowerCAmelCase ( UpperCAmelCase__ : Any ) ->Optional[Any]:
A__ : Optional[int] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
A__ : Optional[int] = {"""data""": DATA}
with open(UpperCAmelCase__, """w""" ) as f:
json.dump(UpperCAmelCase__, UpperCAmelCase__ )
return path
@pytest.fixture(scope="""session""" )
def _lowerCAmelCase ( UpperCAmelCase__ : Any ) ->Any:
A__ : Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
A__ : Tuple = {"""data""": DATA_DICT_OF_LISTS}
with open(UpperCAmelCase__, """w""" ) as f:
json.dump(UpperCAmelCase__, UpperCAmelCase__ )
return path
@pytest.fixture(scope="""session""" )
def _lowerCAmelCase ( UpperCAmelCase__ : List[Any] ) ->Any:
A__ : Optional[int] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl""" )
with open(UpperCAmelCase__, """w""" ) as f:
for item in DATA:
f.write(json.dumps(UpperCAmelCase__ ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _lowerCAmelCase ( UpperCAmelCase__ : int ) ->List[str]:
A__ : Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.jsonl""" )
with open(UpperCAmelCase__, """w""" ) as f:
for item in DATA:
f.write(json.dumps(UpperCAmelCase__ ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _lowerCAmelCase ( UpperCAmelCase__ : Tuple ) ->Optional[int]:
A__ : Any = str(tmp_path_factory.mktemp("""data""" ) / """dataset_312.jsonl""" )
with open(UpperCAmelCase__, """w""" ) as f:
for item in DATA_312:
f.write(json.dumps(UpperCAmelCase__ ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _lowerCAmelCase ( UpperCAmelCase__ : str ) ->Dict:
A__ : Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset-str.jsonl""" )
with open(UpperCAmelCase__, """w""" ) as f:
for item in DATA_STR:
f.write(json.dumps(UpperCAmelCase__ ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Optional[Any] ) ->List[Any]:
import gzip
A__ : Union[str, Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt.gz""" )
with open(UpperCAmelCase__, """rb""" ) as orig_file:
with gzip.open(UpperCAmelCase__, """wb""" ) as zipped_file:
zipped_file.writelines(UpperCAmelCase__ )
return path
@pytest.fixture(scope="""session""" )
def _lowerCAmelCase ( UpperCAmelCase__ : List[Any], UpperCAmelCase__ : int ) ->Dict:
import gzip
A__ : Optional[int] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.gz""" )
with open(UpperCAmelCase__, """rb""" ) as orig_file:
with gzip.open(UpperCAmelCase__, """wb""" ) as zipped_file:
zipped_file.writelines(UpperCAmelCase__ )
return path
@pytest.fixture(scope="""session""" )
def _lowerCAmelCase ( UpperCAmelCase__ : Dict, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Any ) ->List[str]:
A__ : List[str] = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.zip"""
with zipfile.ZipFile(UpperCAmelCase__, """w""" ) as f:
f.write(UpperCAmelCase__, arcname=os.path.basename(UpperCAmelCase__ ) )
f.write(UpperCAmelCase__, arcname=os.path.basename(UpperCAmelCase__ ) )
return path
@pytest.fixture(scope="""session""" )
def _lowerCAmelCase ( UpperCAmelCase__ : str, UpperCAmelCase__ : Any, UpperCAmelCase__ : Dict, UpperCAmelCase__ : Tuple ) ->Dict:
A__ : Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.zip"""
with zipfile.ZipFile(UpperCAmelCase__, """w""" ) as f:
f.write(UpperCAmelCase__, arcname=os.path.join("""nested""", os.path.basename(UpperCAmelCase__ ) ) )
return path
@pytest.fixture(scope="""session""" )
def _lowerCAmelCase ( UpperCAmelCase__ : List[str], UpperCAmelCase__ : str, UpperCAmelCase__ : str ) ->Tuple:
A__ : int = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.jsonl.zip"""
with zipfile.ZipFile(UpperCAmelCase__, """w""" ) as f:
f.write(UpperCAmelCase__, arcname=os.path.join("""main_dir""", os.path.basename(UpperCAmelCase__ ) ) )
f.write(UpperCAmelCase__, arcname=os.path.join("""main_dir""", os.path.basename(UpperCAmelCase__ ) ) )
return path
@pytest.fixture(scope="""session""" )
def _lowerCAmelCase ( UpperCAmelCase__ : Dict, UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Optional[int] ) ->str:
A__ : Dict = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.tar"""
with tarfile.TarFile(UpperCAmelCase__, """w""" ) as f:
f.add(UpperCAmelCase__, arcname=os.path.basename(UpperCAmelCase__ ) )
f.add(UpperCAmelCase__, arcname=os.path.basename(UpperCAmelCase__ ) )
return path
@pytest.fixture(scope="""session""" )
def _lowerCAmelCase ( UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Any ) ->Optional[int]:
A__ : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.tar"""
with tarfile.TarFile(UpperCAmelCase__, """w""" ) as f:
f.add(UpperCAmelCase__, arcname=os.path.join("""nested""", os.path.basename(UpperCAmelCase__ ) ) )
return path
@pytest.fixture(scope="""session""" )
def _lowerCAmelCase ( UpperCAmelCase__ : Dict ) ->int:
A__ : Union[str, Any] = ["""0""", """1""", """2""", """3"""]
A__ : List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt""" )
with open(UpperCAmelCase__, """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _lowerCAmelCase ( UpperCAmelCase__ : List[str] ) ->Union[str, Any]:
A__ : List[Any] = ["""0""", """1""", """2""", """3"""]
A__ : Union[str, Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.txt""" )
with open(UpperCAmelCase__, """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _lowerCAmelCase ( UpperCAmelCase__ : int ) ->Optional[int]:
A__ : str = ["""0""", """1""", """2""", """3"""]
A__ : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.abc"""
with open(UpperCAmelCase__, """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _lowerCAmelCase ( UpperCAmelCase__ : Tuple, UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : str ) ->Optional[int]:
A__ : Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.text.zip"""
with zipfile.ZipFile(UpperCAmelCase__, """w""" ) as f:
f.write(UpperCAmelCase__, arcname=os.path.basename(UpperCAmelCase__ ) )
f.write(UpperCAmelCase__, arcname=os.path.basename(UpperCAmelCase__ ) )
return path
@pytest.fixture(scope="""session""" )
def _lowerCAmelCase ( UpperCAmelCase__ : Tuple, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Optional[Any] ) ->List[str]:
A__ : Any = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.text.zip"""
with zipfile.ZipFile(UpperCAmelCase__, """w""" ) as f:
f.write(UpperCAmelCase__, arcname=os.path.join("""main_dir""", os.path.basename(UpperCAmelCase__ ) ) )
f.write(UpperCAmelCase__, arcname=os.path.join("""main_dir""", os.path.basename(UpperCAmelCase__ ) ) )
return path
@pytest.fixture(scope="""session""" )
def _lowerCAmelCase ( UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Tuple, UpperCAmelCase__ : List[str] ) ->Tuple:
A__ : Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.ext.zip"""
with zipfile.ZipFile(UpperCAmelCase__, """w""" ) as f:
f.write(UpperCAmelCase__, arcname=os.path.basename("""unsupported.ext""" ) )
f.write(UpperCAmelCase__, arcname=os.path.basename("""unsupported_2.ext""" ) )
return path
@pytest.fixture(scope="""session""" )
def _lowerCAmelCase ( UpperCAmelCase__ : Tuple ) ->Optional[Any]:
A__ : List[Any] = """\n""".join(["""First""", """Second\u2029with Unicode new line""", """Third"""] )
A__ : List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset_with_unicode_new_lines.txt""" )
with open(UpperCAmelCase__, """w""", encoding="""utf-8""" ) as f:
f.write(UpperCAmelCase__ )
return path
@pytest.fixture(scope="""session""" )
def _lowerCAmelCase ( ) ->Dict:
return os.path.join("""tests""", """features""", """data""", """test_image_rgb.jpg""" )
@pytest.fixture(scope="""session""" )
def _lowerCAmelCase ( ) ->Optional[int]:
return os.path.join("""tests""", """features""", """data""", """test_audio_44100.wav""" )
@pytest.fixture(scope="""session""" )
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Any ) ->Any:
A__ : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """dataset.img.zip"""
with zipfile.ZipFile(UpperCAmelCase__, """w""" ) as f:
f.write(UpperCAmelCase__, arcname=os.path.basename(UpperCAmelCase__ ) )
f.write(UpperCAmelCase__, arcname=os.path.basename(UpperCAmelCase__ ).replace(""".jpg""", """2.jpg""" ) )
return path
@pytest.fixture(scope="""session""" )
def _lowerCAmelCase ( UpperCAmelCase__ : int ) ->Optional[int]:
A__ : str = tmp_path_factory.mktemp("""data_dir""" )
(data_dir / "subdir").mkdir()
with open(data_dir / """subdir""" / """train.txt""", """w""" ) as f:
f.write("""foo\n""" * 1_0 )
with open(data_dir / """subdir""" / """test.txt""", """w""" ) as f:
f.write("""bar\n""" * 1_0 )
# hidden file
with open(data_dir / """subdir""" / """.test.txt""", """w""" ) as f:
f.write("""bar\n""" * 1_0 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / """.subdir""" / """train.txt""", """w""" ) as f:
f.write("""foo\n""" * 1_0 )
with open(data_dir / """.subdir""" / """test.txt""", """w""" ) as f:
f.write("""bar\n""" * 1_0 )
return data_dir
| 296
|
"""simple docstring"""
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def _lowerCAmelCase ( UpperCAmelCase__ : Sequence[float], UpperCAmelCase__ : int, UpperCAmelCase__ : int ) ->tuple[int | None, int | None, float]:
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
A__ : Optional[int] = (low + high) // 2
A__ , A__ , A__ : List[Any] = max_subarray(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
A__ , A__ , A__ : Union[str, Any] = max_subarray(UpperCAmelCase__, mid + 1, UpperCAmelCase__ )
A__ , A__ , A__ : Union[str, Any] = max_cross_sum(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def _lowerCAmelCase ( UpperCAmelCase__ : Sequence[float], UpperCAmelCase__ : int, UpperCAmelCase__ : int, UpperCAmelCase__ : int ) ->tuple[int, int, float]:
A__ , A__ : Dict = float("""-inf""" ), -1
A__ , A__ : Optional[Any] = float("""-inf""" ), -1
A__ : int | float = 0
for i in range(UpperCAmelCase__, low - 1, -1 ):
summ += arr[i]
if summ > left_sum:
A__ : Optional[int] = summ
A__ : Union[str, Any] = i
A__ : Optional[Any] = 0
for i in range(mid + 1, high + 1 ):
summ += arr[i]
if summ > right_sum:
A__ : int = summ
A__ : Union[str, Any] = i
return max_left, max_right, (left_sum + right_sum)
def _lowerCAmelCase ( UpperCAmelCase__ : int ) ->float:
A__ : Union[str, Any] = [randint(1, UpperCAmelCase__ ) for _ in range(UpperCAmelCase__ )]
A__ : Any = time.time()
max_subarray(UpperCAmelCase__, 0, input_size - 1 )
A__ : List[Any] = time.time()
return end - start
def _lowerCAmelCase ( ) ->None:
A__ : List[Any] = [1_0, 1_0_0, 1_0_0_0, 1_0_0_0_0, 5_0_0_0_0, 1_0_0_0_0_0, 2_0_0_0_0_0, 3_0_0_0_0_0, 4_0_0_0_0_0, 5_0_0_0_0_0]
A__ : Any = [time_max_subarray(UpperCAmelCase__ ) for input_size in input_sizes]
print("""No of Inputs\t\tTime Taken""" )
for input_size, runtime in zip(UpperCAmelCase__, UpperCAmelCase__ ):
print(UpperCAmelCase__, """\t\t""", UpperCAmelCase__ )
plt.plot(UpperCAmelCase__, UpperCAmelCase__ )
plt.xlabel("""Number of Inputs""" )
plt.ylabel("""Time taken in seconds""" )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 296
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
A_ = logging.get_logger(__name__)
A_ = {
'''openai/whisper-base''': '''https://huggingface.co/openai/whisper-base/resolve/main/config.json''',
}
# fmt: off
A_ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 1_0563, 1_0786,
1_1420, 1_1709, 1_1907, 1_3163, 1_3697, 1_3700, 1_4808, 1_5306, 1_6410, 1_6791,
1_7992, 1_9203, 1_9510, 2_0724, 2_2305, 2_2935, 2_7007, 3_0109, 3_0420, 3_3409,
3_4949, 4_0283, 4_0493, 4_0549, 4_7282, 4_9146, 5_0257, 5_0359, 5_0360, 5_0361
]
A_ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 1_0428, 1_0929, 1_1938, 1_2033, 1_2331, 1_2562, 1_3793,
1_4157, 1_4635, 1_5265, 1_5618, 1_6553, 1_6604, 1_8362, 1_8956, 2_0075, 2_1675,
2_2520, 2_6130, 2_6161, 2_6435, 2_8279, 2_9464, 3_1650, 3_2302, 3_2470, 3_6865,
4_2863, 4_7425, 4_9870, 5_0254, 5_0258, 5_0360, 5_0361, 5_0362
]
class __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case_ = 'whisper'
snake_case_ = ['past_key_values']
snake_case_ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Union[str, Any] , snake_case : Dict=5_1865 , snake_case : int=80 , snake_case : int=6 , snake_case : Tuple=4 , snake_case : Any=6 , snake_case : str=4 , snake_case : Dict=1536 , snake_case : List[Any]=1536 , snake_case : List[Any]=0.0 , snake_case : int=0.0 , snake_case : Any=5_0257 , snake_case : List[str]=True , snake_case : List[Any]=True , snake_case : List[str]="gelu" , snake_case : Union[str, Any]=256 , snake_case : List[str]=0.0 , snake_case : str=0.0 , snake_case : List[Any]=0.0 , snake_case : int=0.02 , snake_case : List[Any]=False , snake_case : List[str]=1500 , snake_case : Dict=448 , snake_case : Tuple=5_0256 , snake_case : List[str]=5_0256 , snake_case : Any=5_0256 , snake_case : Optional[int]=None , snake_case : Any=[220, 5_0256] , snake_case : Dict=False , snake_case : Dict=256 , snake_case : Dict=False , snake_case : Dict=0.05 , snake_case : Optional[int]=10 , snake_case : str=2 , snake_case : int=0.0 , snake_case : int=10 , snake_case : int=0 , snake_case : int=7 , **snake_case : Optional[Any] , ):
'''simple docstring'''
A__ : Dict = vocab_size
A__ : List[str] = num_mel_bins
A__ : str = d_model
A__ : Dict = encoder_layers
A__ : Tuple = encoder_attention_heads
A__ : Dict = decoder_layers
A__ : int = decoder_attention_heads
A__ : List[Any] = decoder_ffn_dim
A__ : List[Any] = encoder_ffn_dim
A__ : Optional[Any] = dropout
A__ : Tuple = attention_dropout
A__ : str = activation_dropout
A__ : Tuple = activation_function
A__ : List[Any] = init_std
A__ : Dict = encoder_layerdrop
A__ : Tuple = decoder_layerdrop
A__ : Dict = use_cache
A__ : List[Any] = encoder_layers
A__ : Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True
A__ : Any = max_source_positions
A__ : int = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
A__ : Any = classifier_proj_size
A__ : str = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A__ : Optional[Any] = apply_spec_augment
A__ : List[str] = mask_time_prob
A__ : List[str] = mask_time_length
A__ : Union[str, Any] = mask_time_min_masks
A__ : Optional[int] = mask_feature_prob
A__ : Union[str, Any] = mask_feature_length
A__ : Union[str, Any] = mask_feature_min_masks
A__ : Dict = median_filter_width
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , decoder_start_token_id=__a , suppress_tokens=__a , begin_suppress_tokens=__a , **__a , )
class __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
@property
def _UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
A__ : Any = OrderedDict(
[
("""input_features""", {0: """batch""", 1: """feature_size""", 2: """encoder_sequence"""}),
] )
if self.use_past:
A__ : List[str] = {0: 'batch'}
else:
A__ : Dict = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__a , direction="""inputs""" )
return common_inputs
def _UpperCamelCase ( self : Optional[int] , snake_case : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , snake_case : int = -1 , snake_case : int = -1 , snake_case : bool = False , snake_case : Optional["TensorType"] = None , snake_case : int = 2_2050 , snake_case : float = 5.0 , snake_case : int = 220 , ):
'''simple docstring'''
A__ : Union[str, Any] = OrderedDict()
A__ : Any = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=__a , framework=__a , sampling_rate=__a , time_duration=__a , frequency=__a , )
A__ : Union[str, Any] = encoder_inputs['input_features'].shape[2]
A__ : Optional[Any] = encoder_sequence_length // 2 if self.use_past else seq_length
A__ : List[Any] = super().generate_dummy_inputs(
preprocessor.tokenizer , __a , __a , __a , __a )
A__ : Any = encoder_inputs.pop("""input_features""" )
A__ : Dict = decoder_inputs.pop("""decoder_input_ids""" )
if "past_key_values" in decoder_inputs:
A__ : Union[str, Any] = decoder_inputs.pop("""past_key_values""" )
return dummy_inputs
@property
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return 1e-3
| 350
|
"""simple docstring"""
from __future__ import annotations
class __SCREAMING_SNAKE_CASE :
def __init__( self : Dict , snake_case : int ):
'''simple docstring'''
A__ : List[Any] = order
# a_{0} ... a_{k}
A__ : List[Any] = [1.0] + [0.0] * order
# b_{0} ... b_{k}
A__ : str = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
A__ : Union[str, Any] = [0.0] * self.order
# y[n-1] ... y[n-k]
A__ : List[str] = [0.0] * self.order
def _UpperCamelCase ( self : Optional[int] , snake_case : list[float] , snake_case : list[float] ):
'''simple docstring'''
if len(snake_case ) < self.order:
A__ : Any = [1.0, *a_coeffs]
if len(snake_case ) != self.order + 1:
A__ : str = (
F'Expected a_coeffs to have {self.order + 1} elements '
F'for {self.order}-order filter, got {len(snake_case )}'
)
raise ValueError(snake_case )
if len(snake_case ) != self.order + 1:
A__ : Union[str, Any] = (
F'Expected b_coeffs to have {self.order + 1} elements '
F'for {self.order}-order filter, got {len(snake_case )}'
)
raise ValueError(snake_case )
A__ : Dict = a_coeffs
A__ : Any = b_coeffs
def _UpperCamelCase ( self : List[str] , snake_case : float ):
'''simple docstring'''
A__ : str = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
A__ : Dict = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
A__ : Tuple = self.input_history[:-1]
A__ : int = self.output_history[:-1]
A__ : Dict = sample
A__ : Tuple = result
return result
| 296
| 0
|
"""simple docstring"""
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
A_ = Lock()
def _lowerCAmelCase ( UpperCAmelCase__ : str, UpperCAmelCase__ : List[str], UpperCAmelCase__ : List[str], UpperCAmelCase__ : List[str], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Optional[Any] ) ->List[str]:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0, 1_0 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(lowercase__ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
A__ : Dict = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
A__ : Optional[int] = min(lowercase__, lowercase__ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(lowercase__ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
A__ : List[str] = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
A__ : Optional[int] = max(lowercase__, lowercase__ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(lowercase__ )
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[int] ) ->Optional[Any]:
A__ : str = []
A__ : Any = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
A__ : int = Pipe()
A__ : Any = Pipe()
process_array_.append(
Process(
target=lowercase__, args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]), ) )
A__ : Union[str, Any] = temp_rs
A__ : Optional[Any] = temp_rr
for i in range(1, len(lowercase__ ) - 1 ):
A__ : Any = Pipe()
A__ : int = Pipe()
process_array_.append(
Process(
target=lowercase__, args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]), ) )
A__ : Dict = temp_rs
A__ : Any = temp_rr
process_array_.append(
Process(
target=lowercase__, args=(
len(lowercase__ ) - 1,
arr[len(lowercase__ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(lowercase__ ) - 1],
), ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0, len(lowercase__ ) ):
A__ : Dict = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def _lowerCAmelCase ( ) ->Union[str, Any]:
A__ : Any = list(range(1_0, 0, -1 ) )
print("""Initial List""" )
print(*lowercase__ )
A__ : Tuple = odd_even_transposition(lowercase__ )
print("""Sorted List\n""" )
print(*lowercase__ )
if __name__ == "__main__":
main()
| 351
|
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[int] , snake_case : Optional[Any] , snake_case : Tuple=13 , snake_case : Dict=7 , snake_case : Optional[int]=True , snake_case : Union[str, Any]=True , snake_case : Dict=True , snake_case : Any=True , snake_case : List[str]=99 , snake_case : str=64 , snake_case : Optional[int]=5 , snake_case : str=4 , snake_case : List[Any]=37 , snake_case : Optional[Any]="gelu" , snake_case : List[str]=0.1 , snake_case : str=0.1 , snake_case : Optional[int]=512 , snake_case : Dict=16 , snake_case : List[Any]=2 , snake_case : Optional[int]=0.02 , snake_case : Any=3 , snake_case : Union[str, Any]=4 , snake_case : Dict=None , ):
'''simple docstring'''
A__ : Tuple = parent
A__ : Union[str, Any] = batch_size
A__ : List[str] = seq_length
A__ : Optional[int] = is_training
A__ : Dict = use_input_mask
A__ : Any = use_token_type_ids
A__ : Optional[Any] = use_labels
A__ : List[str] = vocab_size
A__ : Optional[int] = hidden_size
A__ : Optional[Any] = num_hidden_layers
A__ : Any = num_attention_heads
A__ : List[Any] = intermediate_size
A__ : Optional[Any] = hidden_act
A__ : Optional[int] = hidden_dropout_prob
A__ : Tuple = attention_probs_dropout_prob
A__ : str = max_position_embeddings
A__ : List[str] = type_vocab_size
A__ : Union[str, Any] = type_sequence_label_size
A__ : List[Any] = initializer_range
A__ : Optional[int] = num_labels
A__ : Dict = num_choices
A__ : Dict = scope
A__ : List[Any] = vocab_size - 1
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : List[Any] = None
if self.use_input_mask:
A__ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
A__ : Union[str, Any] = None
if self.use_labels:
A__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : Tuple = self.get_config()
return config, input_ids, input_mask, token_labels
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
A__ , A__ , A__ , A__ : str = self.prepare_config_and_inputs()
A__ : Union[str, Any] = True
return config, input_ids, input_mask, token_labels
def _UpperCamelCase ( self : Union[str, Any] , snake_case : Optional[int] , snake_case : List[str] , snake_case : int ):
'''simple docstring'''
A__ : Any = GPTNeoXModel(config=snake_case )
model.to(snake_case )
model.eval()
A__ : int = model(snake_case , attention_mask=snake_case )
A__ : Optional[int] = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : Union[str, Any] , snake_case : str , snake_case : Any , snake_case : Union[str, Any] ):
'''simple docstring'''
A__ : int = True
A__ : str = GPTNeoXModel(snake_case )
model.to(snake_case )
model.eval()
A__ : Tuple = model(snake_case , attention_mask=snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : Dict , snake_case : List[Any] , snake_case : str , snake_case : Optional[Any] , snake_case : Any ):
'''simple docstring'''
A__ : Any = GPTNeoXForCausalLM(config=snake_case )
model.to(snake_case )
model.eval()
A__ : Tuple = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self : List[str] , snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : Union[str, Any] , snake_case : Tuple ):
'''simple docstring'''
A__ : int = self.num_labels
A__ : int = GPTNeoXForQuestionAnswering(snake_case )
model.to(snake_case )
model.eval()
A__ : Optional[Any] = model(snake_case , attention_mask=snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self : str , snake_case : Tuple , snake_case : int , snake_case : int , snake_case : Dict ):
'''simple docstring'''
A__ : List[Any] = self.num_labels
A__ : Tuple = GPTNeoXForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
A__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : List[str] = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self : Any , snake_case : Union[str, Any] , snake_case : int , snake_case : Tuple , snake_case : Any ):
'''simple docstring'''
A__ : Tuple = self.num_labels
A__ : Any = GPTNeoXForTokenClassification(snake_case )
model.to(snake_case )
model.eval()
A__ : Dict = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self : List[str] , snake_case : List[str] , snake_case : Tuple , snake_case : Any ):
'''simple docstring'''
A__ : Optional[int] = True
A__ : Any = GPTNeoXForCausalLM(config=snake_case )
model.to(snake_case )
model.eval()
# first forward pass
A__ : Tuple = model(snake_case , attention_mask=snake_case , use_cache=snake_case )
A__ : str = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A__ : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
A__ : Any = torch.cat([input_ids, next_tokens] , dim=-1 )
A__ : Any = torch.cat([input_mask, next_mask] , dim=-1 )
A__ : Tuple = model(snake_case , attention_mask=snake_case , output_hidden_states=snake_case )
A__ : List[Any] = output_from_no_past["""hidden_states"""][0]
A__ : List[str] = model(
snake_case , attention_mask=snake_case , past_key_values=snake_case , output_hidden_states=snake_case , )["""hidden_states"""][0]
# select random slice
A__ : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A__ : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
A__ : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case , snake_case , atol=1e-3 ) )
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ : str = self.prepare_config_and_inputs()
A__ , A__ , A__ , A__ : Dict = config_and_inputs
A__ : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase ):
snake_case_ = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case_ = (GPTNeoXForCausalLM,) if is_torch_available() else ()
snake_case_ = (
{
'feature-extraction': GPTNeoXModel,
'question-answering': GPTNeoXForQuestionAnswering,
'text-classification': GPTNeoXForSequenceClassification,
'text-generation': GPTNeoXForCausalLM,
'token-classification': GPTNeoXForTokenClassification,
'zero-shot': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : Any = GPTNeoXModelTester(self )
A__ : Any = ConfigTester(self , config_class=snake_case , hidden_size=64 , num_attention_heads=8 )
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
A__ , A__ , A__ , A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(snake_case , snake_case , snake_case )
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
A__ , A__ , A__ , A__ : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(snake_case , snake_case , snake_case )
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
A__ , A__ , A__ , A__ : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
A__ : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(snake_case , snake_case , snake_case )
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ , A__ , A__ , A__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(snake_case , snake_case , snake_case )
def _UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
A__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*snake_case )
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case )
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case )
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case )
@unittest.skip(reason="""Feed forward chunking is not implemented""" )
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _UpperCamelCase ( self : Optional[Any] , snake_case : Optional[Any] ):
'''simple docstring'''
A__ , A__ : int = self.model_tester.prepare_config_and_inputs_for_common()
A__ : List[Any] = ids_tensor([1, 10] , config.vocab_size )
A__ : str = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A__ : Union[str, Any] = GPTNeoXModel(snake_case )
original_model.to(snake_case )
original_model.eval()
A__ : Optional[int] = original_model(snake_case ).last_hidden_state
A__ : List[str] = original_model(snake_case ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A__ : Optional[int] = {"""type""": scaling_type, """factor""": 10.0}
A__ : Optional[int] = GPTNeoXModel(snake_case )
scaled_model.to(snake_case )
scaled_model.eval()
A__ : List[str] = scaled_model(snake_case ).last_hidden_state
A__ : Tuple = scaled_model(snake_case ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case , snake_case , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(snake_case , snake_case , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case , snake_case , atol=1e-5 ) )
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : Any = AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
for checkpointing in [True, False]:
A__ : Optional[Any] = GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(snake_case )
A__ : Optional[Any] = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(snake_case )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
A__ : Union[str, Any] = """My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure"""
A__ : Tuple = model.generate(**snake_case , do_sample=snake_case , max_new_tokens=20 )
A__ : Tuple = tokenizer.batch_decode(snake_case )[0]
self.assertEqual(snake_case , snake_case )
| 296
| 0
|
"""simple docstring"""
import collections
import os
import re
from pathlib import Path
A_ = '''src/transformers'''
# Matches is_xxx_available()
A_ = re.compile(r'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
A_ = re.compile(r'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
A_ = re.compile(r'''\s+\"\S*\":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
A_ = re.compile(r'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
A_ = re.compile(r'''^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
A_ = re.compile(r'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
A_ = re.compile(r'''^\s+\"([^\"]+)\",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
A_ = re.compile(r'''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
A_ = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
A_ = re.compile(r'''^\s*try:''')
# Catches a line with else:
A_ = re.compile(r'''^\s*else:''')
def _lowerCAmelCase ( UpperCAmelCase__ : Dict ) ->List[str]:
if _re_test_backend.search(__UpperCAmelCase ) is None:
return None
A__ : int = [b[0] for b in _re_backend.findall(__UpperCAmelCase )]
backends.sort()
return "_and_".join(__UpperCAmelCase )
def _lowerCAmelCase ( UpperCAmelCase__ : str ) ->Union[str, Any]:
with open(__UpperCAmelCase, """r""", encoding="""utf-8""", newline="""\n""" ) as f:
A__ : str = f.readlines()
A__ : List[Any] = 0
while line_index < len(__UpperCAmelCase ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__UpperCAmelCase ):
return None
# First grab the objects without a specific backend in _import_structure
A__ : Optional[Any] = []
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
A__ : str = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__UpperCAmelCase ):
A__ : Any = _re_one_line_import_struct.search(__UpperCAmelCase ).groups()[0]
A__ : List[str] = re.findall(R"""\[([^\]]+)\]""", __UpperCAmelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
A__ : str = _re_import_struct_key_value.search(__UpperCAmelCase )
if single_line_import_search is not None:
A__ : Union[str, Any] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(__UpperCAmelCase ) > 0]
objects.extend(__UpperCAmelCase )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
A__ : Dict = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
A__ : Union[str, Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
A__ : Union[str, Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
A__ : int = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
A__ : int = lines[line_index]
if _re_import_struct_add_one.search(__UpperCAmelCase ) is not None:
objects.append(_re_import_struct_add_one.search(__UpperCAmelCase ).groups()[0] )
elif _re_import_struct_add_many.search(__UpperCAmelCase ) is not None:
A__ : Dict = _re_import_struct_add_many.search(__UpperCAmelCase ).groups()[0].split(""", """ )
A__ : Tuple = [obj[1:-1] for obj in imports if len(__UpperCAmelCase ) > 0]
objects.extend(__UpperCAmelCase )
elif _re_between_brackets.search(__UpperCAmelCase ) is not None:
A__ : Optional[Any] = _re_between_brackets.search(__UpperCAmelCase ).groups()[0].split(""", """ )
A__ : Any = [obj[1:-1] for obj in imports if len(__UpperCAmelCase ) > 0]
objects.extend(__UpperCAmelCase )
elif _re_quote_object.search(__UpperCAmelCase ) is not None:
objects.append(_re_quote_object.search(__UpperCAmelCase ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 1_2 + """\"""" ):
objects.append(line[1_3:-3] )
line_index += 1
A__ : Dict = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
A__ : List[str] = []
while (
line_index < len(__UpperCAmelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
A__ : List[str] = lines[line_index]
A__ : int = _re_import.search(__UpperCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
A__ : Any = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(__UpperCAmelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
A__ : int = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
A__ : str = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
A__ : Dict = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
A__ : List[Any] = lines[line_index]
A__ : Optional[Any] = _re_import.search(__UpperCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 1_2 ):
objects.append(line[1_2:-2] )
line_index += 1
A__ : Union[str, Any] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _lowerCAmelCase ( UpperCAmelCase__ : List[str], UpperCAmelCase__ : List[Any] ) ->Dict:
def find_duplicates(UpperCAmelCase__ : Any ):
return [k for k, v in collections.Counter(__UpperCAmelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
A__ : int = []
for key in import_dict_objects.keys():
A__ : Optional[Any] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f'Duplicate _import_structure definitions for: {duplicate_imports}' )
A__ : Any = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
A__ : List[str] = '''base imports''' if key == '''none''' else f'{key} backend'
errors.append(f'Differences for {name}:' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f' {a} in TYPE_HINT but not in _import_structure.' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f' {a} in _import_structure but not in TYPE_HINT.' )
return errors
def _lowerCAmelCase ( ) ->Dict:
A__ : Optional[int] = []
for root, _, files in os.walk(__UpperCAmelCase ):
if "__init__.py" in files:
A__ : List[str] = os.path.join(__UpperCAmelCase, """__init__.py""" )
A__ : str = parse_init(__UpperCAmelCase )
if objects is not None:
A__ : List[str] = analyze_results(*__UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
A__ : int = f'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'
failures.append("""\n""".join(__UpperCAmelCase ) )
if len(__UpperCAmelCase ) > 0:
raise ValueError("""\n\n""".join(__UpperCAmelCase ) )
def _lowerCAmelCase ( ) ->Any:
A__ : Any = []
for path, directories, files in os.walk(__UpperCAmelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(__UpperCAmelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__UpperCAmelCase ) / folder).glob("""*.py""" ) ) ) == 0:
continue
A__ : Optional[Any] = str((Path(__UpperCAmelCase ) / folder).relative_to(__UpperCAmelCase ) )
A__ : Dict = short_path.replace(os.path.sep, """.""" )
submodules.append(__UpperCAmelCase )
for fname in files:
if fname == "__init__.py":
continue
A__ : str = str((Path(__UpperCAmelCase ) / fname).relative_to(__UpperCAmelCase ) )
A__ : Dict = short_path.replace(""".py""", """""" ).replace(os.path.sep, """.""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(__UpperCAmelCase )
return submodules
A_ = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
'''models.esm.openfold_utils''',
]
def _lowerCAmelCase ( ) ->List[str]:
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
A__ : Optional[int] = direct_transformers_import(__UpperCAmelCase )
A__ : str = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(__UpperCAmelCase, """__init__.py""" ), """r""" ) as f:
A__ : str = f.read()
import_structure_keys.update(set(re.findall(R"""import_structure\[\"([^\"]*)\"\]""", __UpperCAmelCase ) ) )
A__ : Optional[int] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(__UpperCAmelCase ) > 0:
A__ : Dict = '''\n'''.join(f'- {module}' for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registed in the main init of Transformers:\n"""
f'{list_of_modules}\n'
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 352
|
"""simple docstring"""
from collections import defaultdict
from math import gcd
def _lowerCAmelCase ( UpperCAmelCase__ : int = 1_5_0_0_0_0_0 ) ->int:
A__ : defaultdict = defaultdict(UpperCAmelCase__ )
A__ : Any = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1, UpperCAmelCase__, 2 ):
if gcd(UpperCAmelCase__, UpperCAmelCase__ ) > 1:
continue
A__ : str = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(UpperCAmelCase__, limit + 1, UpperCAmelCase__ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F'{solution() = }')
| 296
| 0
|
"""simple docstring"""
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
A_ = logging.get_logger(__name__)
@add_end_docstrings(A__ )
class __SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self : Any , *snake_case : Dict , **snake_case : Dict ):
'''simple docstring'''
super().__init__(*__snake_case , **__snake_case )
self.check_model_type(__snake_case )
def _UpperCamelCase ( self : Optional[Any] , snake_case : Dict=None , snake_case : List[str]=None , snake_case : Tuple=None , **snake_case : Optional[int] ):
'''simple docstring'''
A__ , A__ : Tuple = {}, {}
if padding is not None:
A__ : List[Any] = padding
if truncation is not None:
A__ : str = truncation
if top_k is not None:
A__ : Union[str, Any] = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Union[str, Any] , snake_case : Union["Image.Image", str] , snake_case : str = None , **snake_case : Any ):
'''simple docstring'''
if isinstance(__snake_case , (Image.Image, str) ) and isinstance(__snake_case , __snake_case ):
A__ : Optional[Any] = {"""image""": image, """question""": question}
else:
A__ : Union[str, Any] = image
A__ : List[Any] = super().__call__(__snake_case , **__snake_case )
return results
def _UpperCamelCase ( self : Any , snake_case : List[Any] , snake_case : Optional[Any]=False , snake_case : Union[str, Any]=False ):
'''simple docstring'''
A__ : int = load_image(inputs["""image"""] )
A__ : Union[str, Any] = self.tokenizer(
inputs["""question"""] , return_tensors=self.framework , padding=__snake_case , truncation=__snake_case )
A__ : Optional[int] = self.image_processor(images=__snake_case , return_tensors=self.framework )
model_inputs.update(__snake_case )
return model_inputs
def _UpperCamelCase ( self : Optional[int] , snake_case : Union[str, Any] ):
'''simple docstring'''
A__ : Any = self.model(**__snake_case )
return model_outputs
def _UpperCamelCase ( self : Dict , snake_case : str , snake_case : int=5 ):
'''simple docstring'''
if top_k > self.model.config.num_labels:
A__ : List[str] = self.model.config.num_labels
if self.framework == "pt":
A__ : Optional[Any] = model_outputs.logits.sigmoid()[0]
A__ , A__ : int = probs.topk(__snake_case )
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
A__ : Dict = scores.tolist()
A__ : Any = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(__snake_case , __snake_case )]
| 353
|
"""simple docstring"""
import os
from distutils.util import strtobool
def _lowerCAmelCase ( UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Optional[Any] ) ->List[str]:
for e in env_keys:
A__ : List[Any] = int(os.environ.get(UpperCAmelCase__, -1 ) )
if val >= 0:
return val
return default
def _lowerCAmelCase ( UpperCAmelCase__ : Tuple, UpperCAmelCase__ : str=False ) ->List[str]:
A__ : List[Any] = os.environ.get(UpperCAmelCase__, str(UpperCAmelCase__ ) )
return strtobool(UpperCAmelCase__ ) == 1 # As its name indicates `strtobool` actually returns an int...
def _lowerCAmelCase ( UpperCAmelCase__ : Tuple, UpperCAmelCase__ : List[Any]="no" ) ->int:
A__ : str = os.environ.get(UpperCAmelCase__, str(UpperCAmelCase__ ) )
return value
| 296
| 0
|
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
A_ = tuple[int, int]
class __SCREAMING_SNAKE_CASE :
def __init__( self : Tuple , snake_case : set[int] , snake_case : Mapping[EdgeT, int] ):
'''simple docstring'''
A__ : set[int] = vertices
A__ : dict[EdgeT, int] = {
(min(_lowerCamelCase ), max(_lowerCamelCase )): weight for edge, weight in edges.items()
}
def _UpperCamelCase ( self : Any , snake_case : EdgeT , snake_case : int ):
'''simple docstring'''
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
A__ : List[Any] = weight
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
A__ : Graph = Graph({min(self.vertices )} , {} )
A__ : EdgeT
A__ : int
A__ : EdgeT
A__ : int
while len(subgraph.vertices ) < len(self.vertices ):
A__ : Any = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
A__ : Optional[Any] = edge
A__ : Union[str, Any] = weight
subgraph.add_edge(_lowerCamelCase , _lowerCamelCase )
return subgraph
def _lowerCAmelCase ( UpperCAmelCase__ : str = "p107_network.txt" ) ->int:
A__ : str = os.path.abspath(os.path.dirname(lowerCamelCase__ ) )
A__ : str = os.path.join(lowerCamelCase__, lowerCamelCase__ )
A__ : dict[EdgeT, int] = {}
A__ : list[str]
A__ : int
A__ : int
with open(lowerCamelCase__ ) as f:
A__ : Any = f.read().strip().split("""\n""" )
A__ : Tuple = [line.split(""",""" ) for line in data]
for edgea in range(1, len(lowerCamelCase__ ) ):
for edgea in range(lowerCamelCase__ ):
if adjaceny_matrix[edgea][edgea] != "-":
A__ : Union[str, Any] = int(adjaceny_matrix[edgea][edgea] )
A__ : Graph = Graph(set(range(len(lowerCamelCase__ ) ) ), lowerCamelCase__ )
A__ : Graph = graph.prims_algorithm()
A__ : int = sum(graph.edges.values() )
A__ : int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F'{solution() = }')
| 354
|
"""simple docstring"""
import cva
import numpy as np
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , snake_case : float , snake_case : int ):
'''simple docstring'''
if k in (0.04, 0.06):
A__ : Optional[int] = k
A__ : int = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : List[Any] ):
'''simple docstring'''
return str(self.k )
def _UpperCamelCase ( self : int , snake_case : str ):
'''simple docstring'''
A__ : List[str] = cva.imread(snake_case , 0 )
A__ , A__ : Union[str, Any] = img.shape
A__ : list[list[int]] = []
A__ : Optional[Any] = img.copy()
A__ : List[str] = cva.cvtColor(snake_case , cva.COLOR_GRAY2RGB )
A__ , A__ : List[Any] = np.gradient(snake_case )
A__ : List[Any] = dx**2
A__ : Any = dy**2
A__ : Dict = dx * dy
A__ : Any = 0.04
A__ : Optional[Any] = self.window_size // 2
for y in range(snake_case , h - offset ):
for x in range(snake_case , w - offset ):
A__ : List[str] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A__ : Tuple = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A__ : Optional[int] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A__ : int = (wxx * wyy) - (wxy**2)
A__ : Any = wxx + wyy
A__ : List[str] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
A_ = HarrisCorner(0.04, 3)
A_ , A_ = edge_detect.detect('''path_to_image''')
cva.imwrite('''detect.png''', color_img)
| 296
| 0
|
"""simple docstring"""
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
A_ = 0b101_100_111_110_110_010_010_000_011_110_111_011_000_110_011_110
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
A_ = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[str] ):
'''simple docstring'''
A__ : List[Any] = WATERMARK_BITS
A__ : int = WatermarkEncoder()
self.encoder.set_watermark("""bits""" , self.watermark )
def _UpperCamelCase ( self : str , snake_case : torch.FloatTensor ):
'''simple docstring'''
if images.shape[-1] < 256:
return images
A__ : Tuple = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
A__ : Any = [self.encoder.encode(_a , """dwtDct""" ) for image in images]
A__ : Optional[Any] = torch.from_numpy(np.array(_a ) ).permute(0 , 3 , 1 , 2 )
A__ : str = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 355
|
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
A_ = logging.get_logger(__name__)
A_ = Dict[str, Any]
A_ = List[Prediction]
@add_end_docstrings(UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
def __init__( self : str , *snake_case : Tuple , **snake_case : Tuple ):
'''simple docstring'''
super().__init__(*snake_case , **snake_case )
if self.framework == "tf":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
requires_backends(self , """vision""" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def _UpperCamelCase ( self : List[Any] , **snake_case : Optional[int] ):
'''simple docstring'''
A__ : Dict = {}
if "threshold" in kwargs:
A__ : int = kwargs["""threshold"""]
return {}, {}, postprocess_kwargs
def __call__( self : Tuple , *snake_case : Union[str, Any] , **snake_case : Union[str, Any] ):
'''simple docstring'''
return super().__call__(*snake_case , **snake_case )
def _UpperCamelCase ( self : str , snake_case : int ):
'''simple docstring'''
A__ : List[str] = load_image(snake_case )
A__ : int = torch.IntTensor([[image.height, image.width]] )
A__ : Union[str, Any] = self.image_processor(images=[image] , return_tensors="""pt""" )
if self.tokenizer is not None:
A__ : str = self.tokenizer(text=inputs["""words"""] , boxes=inputs["""boxes"""] , return_tensors="""pt""" )
A__ : List[str] = target_size
return inputs
def _UpperCamelCase ( self : Optional[int] , snake_case : List[Any] ):
'''simple docstring'''
A__ : str = model_inputs.pop("""target_size""" )
A__ : Dict = self.model(**snake_case )
A__ : Optional[Any] = outputs.__class__({"""target_size""": target_size, **outputs} )
if self.tokenizer is not None:
A__ : str = model_inputs["""bbox"""]
return model_outputs
def _UpperCamelCase ( self : Tuple , snake_case : Optional[int] , snake_case : int=0.9 ):
'''simple docstring'''
A__ : Any = model_outputs["""target_size"""]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
A__ , A__ : Tuple = target_size[0].tolist()
def unnormalize(snake_case : Optional[int] ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
A__ , A__ : Optional[int] = model_outputs["""logits"""].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
A__ : Optional[Any] = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
A__ : List[str] = [unnormalize(snake_case ) for bbox in model_outputs["""bbox"""].squeeze(0 )]
A__ : Tuple = ["""score""", """label""", """box"""]
A__ : Any = [dict(zip(snake_case , snake_case ) ) for vals in zip(scores.tolist() , snake_case , snake_case ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
A__ : Union[str, Any] = self.image_processor.post_process_object_detection(snake_case , snake_case , snake_case )
A__ : str = raw_annotations[0]
A__ : str = raw_annotation["""scores"""]
A__ : List[Any] = raw_annotation["""labels"""]
A__ : int = raw_annotation["""boxes"""]
A__ : str = scores.tolist()
A__ : Any = [self.model.config.idalabel[label.item()] for label in labels]
A__ : int = [self._get_bounding_box(snake_case ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
A__ : str = ["""score""", """label""", """box"""]
A__ : Dict = [
dict(zip(snake_case , snake_case ) )
for vals in zip(raw_annotation["""scores"""] , raw_annotation["""labels"""] , raw_annotation["""boxes"""] )
]
return annotation
def _UpperCamelCase ( self : Union[str, Any] , snake_case : "torch.Tensor" ):
'''simple docstring'''
if self.framework != "pt":
raise ValueError("""The ObjectDetectionPipeline is only available in PyTorch.""" )
A__ , A__ , A__ , A__ : Any = box.int().tolist()
A__ : Any = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 296
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"""RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""",
"""RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""",
"""RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""",
"""RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""",
"""RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""",
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
snake_case_ = 'rwkv'
snake_case_ = {'max_position_embeddings': 'context_length'}
def __init__( self : Union[str, Any] , snake_case : Any=5_0277 , snake_case : Dict=1024 , snake_case : Union[str, Any]=4096 , snake_case : Union[str, Any]=32 , snake_case : Any=None , snake_case : str=None , snake_case : Optional[Any]=1e-5 , snake_case : List[Any]=0 , snake_case : int=0 , snake_case : Union[str, Any]=6 , snake_case : Optional[Any]=False , snake_case : Dict=True , **snake_case : Tuple , ):
'''simple docstring'''
A__ : Optional[Any] = vocab_size
A__ : Optional[int] = context_length
A__ : List[Any] = hidden_size
A__ : List[Any] = num_hidden_layers
A__ : str = attention_hidden_size if attention_hidden_size is not None else hidden_size
A__ : Dict = intermediate_size if intermediate_size is not None else 4 * hidden_size
A__ : Tuple = layer_norm_epsilon
A__ : Optional[int] = rescale_every
A__ : Tuple = use_cache
A__ : List[str] = bos_token_id
A__ : Dict = eos_token_id
super().__init__(
tie_word_embeddings=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , **snake_case )
| 356
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A_ = logging.get_logger(__name__)
A_ = {
'''microsoft/table-transformer-detection''': (
'''https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'''
),
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = 'table-transformer'
snake_case_ = ['past_key_values']
snake_case_ = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Dict , snake_case : int=True , snake_case : Dict=None , snake_case : Union[str, Any]=3 , snake_case : Dict=100 , snake_case : Tuple=6 , snake_case : Optional[int]=2048 , snake_case : int=8 , snake_case : Dict=6 , snake_case : Any=2048 , snake_case : str=8 , snake_case : Union[str, Any]=0.0 , snake_case : List[str]=0.0 , snake_case : List[str]=True , snake_case : Any="relu" , snake_case : str=256 , snake_case : int=0.1 , snake_case : Dict=0.0 , snake_case : str=0.0 , snake_case : Union[str, Any]=0.02 , snake_case : Union[str, Any]=1.0 , snake_case : Optional[Any]=False , snake_case : int="sine" , snake_case : Optional[Any]="resnet50" , snake_case : Optional[int]=True , snake_case : Any=False , snake_case : int=1 , snake_case : Tuple=5 , snake_case : Optional[int]=2 , snake_case : Tuple=1 , snake_case : Optional[Any]=1 , snake_case : Optional[Any]=5 , snake_case : Dict=2 , snake_case : Any=0.1 , **snake_case : Any , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
A__ : Optional[Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(snake_case , snake_case ):
A__ : Optional[int] = backbone_config.get("""model_type""" )
A__ : Optional[int] = CONFIG_MAPPING[backbone_model_type]
A__ : List[str] = config_class.from_dict(snake_case )
# set timm attributes to None
A__ , A__ , A__ : str = None, None, None
A__ : Tuple = use_timm_backbone
A__ : str = backbone_config
A__ : str = num_channels
A__ : List[Any] = num_queries
A__ : Optional[Any] = d_model
A__ : Tuple = encoder_ffn_dim
A__ : Union[str, Any] = encoder_layers
A__ : List[Any] = encoder_attention_heads
A__ : Optional[int] = decoder_ffn_dim
A__ : Any = decoder_layers
A__ : int = decoder_attention_heads
A__ : Any = dropout
A__ : Dict = attention_dropout
A__ : Dict = activation_dropout
A__ : Tuple = activation_function
A__ : List[str] = init_std
A__ : List[str] = init_xavier_std
A__ : Any = encoder_layerdrop
A__ : Optional[Any] = decoder_layerdrop
A__ : Union[str, Any] = encoder_layers
A__ : Dict = auxiliary_loss
A__ : List[Any] = position_embedding_type
A__ : Optional[Any] = backbone
A__ : str = use_pretrained_backbone
A__ : Union[str, Any] = dilation
# Hungarian matcher
A__ : Tuple = class_cost
A__ : Optional[Any] = bbox_cost
A__ : Dict = giou_cost
# Loss coefficients
A__ : Any = mask_loss_coefficient
A__ : str = dice_loss_coefficient
A__ : str = bbox_loss_coefficient
A__ : Union[str, Any] = giou_loss_coefficient
A__ : List[str] = eos_coefficient
super().__init__(is_encoder_decoder=snake_case , **snake_case )
@property
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
return self.d_model
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = version.parse('1.11' )
@property
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
return 1e-5
@property
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
return 12
| 296
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
A_ = logging.get_logger(__name__)
A_ = {
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/resolve/main/config.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/config.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/config.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json",
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = '''bloom'''
snake_case_ = ['''past_key_values''']
snake_case_ = {
'''num_hidden_layers''': '''n_layer''',
'''num_attention_heads''': '''n_head''',
}
def __init__( self : Optional[Any] , snake_case : Dict=25_0880 , snake_case : str=64 , snake_case : List[str]=2 , snake_case : Tuple=8 , snake_case : Any=1e-5 , snake_case : List[str]=0.02 , snake_case : Any=True , snake_case : Dict=1 , snake_case : List[Any]=2 , snake_case : Dict=False , snake_case : Tuple=0.0 , snake_case : List[str]=0.0 , snake_case : List[Any]=1 , snake_case : Any=False , **snake_case : List[Any] , ):
'''simple docstring'''
A__ : Dict = vocab_size
# Backward compatibility with n_embed kwarg
A__ : str = kwargs.pop("""n_embed""" , _snake_case )
A__ : List[Any] = hidden_size if n_embed is None else n_embed
A__ : Optional[Any] = n_layer
A__ : List[Any] = n_head
A__ : Optional[int] = layer_norm_epsilon
A__ : Union[str, Any] = initializer_range
A__ : Union[str, Any] = use_cache
A__ : List[Any] = pretraining_tp
A__ : Tuple = apply_residual_connection_post_layernorm
A__ : Optional[Any] = hidden_dropout
A__ : Tuple = attention_dropout
A__ : Optional[int] = bos_token_id
A__ : Union[str, Any] = eos_token_id
A__ : str = slow_but_exact
super().__init__(bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = version.parse('1.12' )
def __init__( self : int , snake_case : PretrainedConfig , snake_case : str = "default" , snake_case : List[PatchingSpec] = None , snake_case : bool = False , ):
'''simple docstring'''
super().__init__(_snake_case , task=_snake_case , patching_specs=_snake_case , use_past=_snake_case )
if not getattr(self._config , """pad_token_id""" , _snake_case ):
# TODO: how to do that better?
A__ : Optional[Any] = 0
@property
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : List[Any] = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(_snake_case , direction="""inputs""" , inverted_values_shape=_snake_case )
A__ : Optional[int] = {0: """batch""", 1: """past_sequence + sequence"""}
else:
A__ : List[str] = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
return self._config.n_layer
@property
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
return self._config.n_head
@property
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
return 1e-3
def _UpperCamelCase ( self : Dict , snake_case : "PreTrainedTokenizer" , snake_case : int = -1 , snake_case : int = -1 , snake_case : bool = False , snake_case : Optional["TensorType"] = None , ):
'''simple docstring'''
A__ : Any = super(_snake_case , self ).generate_dummy_inputs(
_snake_case , batch_size=_snake_case , seq_length=_snake_case , is_pair=_snake_case , framework=_snake_case )
# We need to order the input in the way they appears in the forward()
A__ : Optional[int] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
A__ , A__ : List[Any] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
A__ : str = seqlen + 2
A__ : Optional[int] = self._config.hidden_size // self.num_attention_heads
A__ : List[str] = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
A__ : Union[str, Any] = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
A__ : List[str] = [
(torch.zeros(_snake_case ), torch.zeros(_snake_case )) for _ in range(self.num_layers )
]
A__ : Tuple = common_inputs["""attention_mask"""]
if self.use_past:
A__ : Tuple = ordered_inputs["""attention_mask"""].dtype
A__ : Any = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(_snake_case , _snake_case , dtype=_snake_case )] , dim=1 )
return ordered_inputs
@property
def _UpperCamelCase ( self : int ):
'''simple docstring'''
return 13
| 357
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = 'Salesforce/blip-image-captioning-base'
snake_case_ = (
'This is a tool that generates a description of an image. It takes an input named `image` which should be the '
'image to caption, and returns a text that contains the description in English.'
)
snake_case_ = 'image_captioner'
snake_case_ = AutoModelForVisionaSeq
snake_case_ = ['image']
snake_case_ = ['text']
def __init__( self : int , *snake_case : Optional[int] , **snake_case : Optional[int] ):
'''simple docstring'''
requires_backends(self , ["""vision"""] )
super().__init__(*snake_case , **snake_case )
def _UpperCamelCase ( self : int , snake_case : "Image" ):
'''simple docstring'''
return self.pre_processor(images=snake_case , return_tensors="""pt""" )
def _UpperCamelCase ( self : int , snake_case : List[Any] ):
'''simple docstring'''
return self.model.generate(**snake_case )
def _UpperCamelCase ( self : Optional[int] , snake_case : Any ):
'''simple docstring'''
return self.pre_processor.batch_decode(snake_case , skip_special_tokens=snake_case )[0].strip()
| 296
| 0
|
"""simple docstring"""
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
snake_case_ = 'philschmid/bart-large-cnn-samsum'
snake_case_ = (
'This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '
'and returns a summary of the text.'
)
snake_case_ = 'summarizer'
snake_case_ = AutoTokenizer
snake_case_ = AutoModelForSeqaSeqLM
snake_case_ = ['text']
snake_case_ = ['text']
def _UpperCamelCase ( self : str , snake_case : Optional[int] ):
'''simple docstring'''
return self.pre_processor(_UpperCamelCase , return_tensors="""pt""" , truncation=_UpperCamelCase )
def _UpperCamelCase ( self : Optional[Any] , snake_case : str ):
'''simple docstring'''
return self.model.generate(**_UpperCamelCase )[0]
def _UpperCamelCase ( self : str , snake_case : str ):
'''simple docstring'''
return self.pre_processor.decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase )
| 358
|
"""simple docstring"""
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : List[Any] ):
'''simple docstring'''
super().__init__()
A__ : int = nn.Linear(3 , 4 )
A__ : Union[str, Any] = nn.BatchNormad(4 )
A__ : Union[str, Any] = nn.Linear(4 , 5 )
def _UpperCamelCase ( self : str , snake_case : List[str] ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(snake_case ) ) )
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ : int = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(snake_case , model.state_dict() )
A__ : List[str] = os.path.join(snake_case , """index.json""" )
self.assertTrue(os.path.isfile(snake_case ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
A__ : List[str] = os.path.join(snake_case , F'{key}.dat' )
self.assertTrue(os.path.isfile(snake_case ) )
# TODO: add tests on the fact weights are properly loaded
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : Union[str, Any] = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
A__ : str = torch.randn(2 , 3 , dtype=snake_case )
with TemporaryDirectory() as tmp_dir:
A__ : List[str] = offload_weight(snake_case , """weight""" , snake_case , {} )
A__ : Union[str, Any] = os.path.join(snake_case , """weight.dat""" )
self.assertTrue(os.path.isfile(snake_case ) )
self.assertDictEqual(snake_case , {"""weight""": {"""shape""": [2, 3], """dtype""": str(snake_case ).split(""".""" )[1]}} )
A__ : str = load_offloaded_weight(snake_case , index["""weight"""] )
self.assertTrue(torch.equal(snake_case , snake_case ) )
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
A__ : str = ModelForTest()
A__ : Union[str, Any] = model.state_dict()
A__ : Optional[int] = {k: v for k, v in state_dict.items() if """linear2""" not in k}
A__ : List[Any] = {k: v for k, v in state_dict.items() if """linear2""" in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(snake_case , snake_case )
A__ : Dict = OffloadedWeightsLoader(state_dict=snake_case , save_folder=snake_case )
# Every key is there with the right value
self.assertEqual(sorted(snake_case ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(snake_case , weight_map[key] ) )
A__ : int = {k: v for k, v in state_dict.items() if """weight""" in k}
A__ : Tuple = {k: v for k, v in state_dict.items() if """weight""" not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(snake_case , snake_case )
A__ : Optional[Any] = OffloadedWeightsLoader(state_dict=snake_case , save_folder=snake_case )
# Every key is there with the right value
self.assertEqual(sorted(snake_case ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(snake_case , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(snake_case , snake_case )
# Duplicates are removed
A__ : int = OffloadedWeightsLoader(state_dict=snake_case , save_folder=snake_case )
# Every key is there with the right value
self.assertEqual(sorted(snake_case ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(snake_case , weight_map[key] ) )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : List[str] = {"""a.1""": 0, """a.10""": 1, """a.2""": 2}
A__ : str = extract_submodules_state_dict(snake_case , ["""a.1""", """a.2"""] )
self.assertDictEqual(snake_case , {"""a.1""": 0, """a.2""": 2} )
A__ : Dict = {"""a.1.a""": 0, """a.10.a""": 1, """a.2.a""": 2}
A__ : int = extract_submodules_state_dict(snake_case , ["""a.1""", """a.2"""] )
self.assertDictEqual(snake_case , {"""a.1.a""": 0, """a.2.a""": 2} )
| 296
| 0
|
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
A_ = """\
@inproceedings{kakwani2020indicnlpsuite,
title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},
author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},
year={2020},
booktitle={Findings of EMNLP},
}
"""
A_ = """\
IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide
variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.
"""
A_ = """
Compute IndicGLUE evaluation metric associated to each IndicGLUE dataset.
Args:
predictions: list of predictions to score (as int64),
except for 'cvit-mkb-clsr' where each prediction is a vector (of float32).
references: list of ground truth labels corresponding to the predictions (as int64),
except for 'cvit-mkb-clsr' where each reference is a vector (of float32).
Returns: depending on the IndicGLUE subset, one or several of:
\"accuracy\": Accuracy
\"f1\": F1 score
\"precision\": Precision@10
Examples:
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'wnli') # 'wnli' or any of [\"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'wiki-ner')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'cvit-mkb-clsr')
>>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'precision@10': 1.0}
"""
def _lowerCAmelCase ( UpperCAmelCase__ : Tuple, UpperCAmelCase__ : Union[str, Any] ) ->Optional[int]:
return float((preds == labels).mean() )
def _lowerCAmelCase ( UpperCAmelCase__ : Dict, UpperCAmelCase__ : Optional[int] ) ->Optional[int]:
A__ : Any = simple_accuracy(UpperCAmelCase__, UpperCAmelCase__ )
A__ : Any = float(fa_score(y_true=UpperCAmelCase__, y_pred=UpperCAmelCase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : str ) ->Dict:
A__ : str = np.array(UpperCAmelCase__ )
A__ : Union[str, Any] = np.array(UpperCAmelCase__ )
A__ : Dict = en_sentvecs.shape[0]
# mean centering
A__ : Optional[int] = en_sentvecs - np.mean(UpperCAmelCase__, axis=0 )
A__ : Union[str, Any] = in_sentvecs - np.mean(UpperCAmelCase__, axis=0 )
A__ : Optional[Any] = cdist(UpperCAmelCase__, UpperCAmelCase__, """cosine""" )
A__ : Tuple = np.array(range(UpperCAmelCase__ ) )
A__ : Tuple = sim.argsort(axis=1 )[:, :1_0]
A__ : Optional[Any] = np.any(preds == actual[:, None], axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", """
"""\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", """
"""\"wiki-ner\"]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" )
if self.config_name != """cvit-mkb-clsr"""
else datasets.Sequence(datasets.Value("""float32""" ) ),
"""references""": datasets.Value("""int64""" )
if self.config_name != """cvit-mkb-clsr"""
else datasets.Sequence(datasets.Value("""float32""" ) ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if self.config_name != """cvit-mkb-clsr""" else None , )
def _UpperCamelCase ( self : Any , snake_case : int , snake_case : Optional[int] ):
'''simple docstring'''
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(__lowercase , __lowercase )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(__lowercase , __lowercase )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(__lowercase , __lowercase )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", """
"""\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", """
"""\"wiki-ner\"]""" )
| 359
|
"""simple docstring"""
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , snake_case : str , snake_case : List[Any]=13 , snake_case : Union[str, Any]=7 , snake_case : Optional[Any]=True , snake_case : str=True , snake_case : Dict=False , snake_case : Union[str, Any]=True , snake_case : Optional[Any]=99 , snake_case : str=32 , snake_case : Tuple=5 , snake_case : List[str]=4 , snake_case : Optional[int]=37 , snake_case : str="gelu" , snake_case : Tuple=0.1 , snake_case : Optional[int]=0.1 , snake_case : int=512 , snake_case : List[str]=16 , snake_case : str=2 , snake_case : Optional[int]=0.02 , snake_case : str=3 , snake_case : Dict=4 , snake_case : Optional[Any]=None , ):
'''simple docstring'''
A__ : int = parent
A__ : Union[str, Any] = batch_size
A__ : Optional[int] = seq_length
A__ : List[Any] = is_training
A__ : List[str] = use_input_mask
A__ : Optional[Any] = use_token_type_ids
A__ : List[Any] = use_labels
A__ : Union[str, Any] = vocab_size
A__ : List[Any] = hidden_size
A__ : Any = num_hidden_layers
A__ : Any = num_attention_heads
A__ : Optional[int] = intermediate_size
A__ : Any = hidden_act
A__ : Tuple = hidden_dropout_prob
A__ : Dict = attention_probs_dropout_prob
A__ : Optional[int] = max_position_embeddings
A__ : Tuple = type_vocab_size
A__ : Union[str, Any] = type_sequence_label_size
A__ : List[str] = initializer_range
A__ : Any = num_labels
A__ : Any = num_choices
A__ : int = scope
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Tuple = None
if self.use_input_mask:
A__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
A__ : Union[str, Any] = None
if self.use_token_type_ids:
A__ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ : int = None
A__ : int = None
A__ : List[str] = None
if self.use_labels:
A__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
A__ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , )
def _UpperCamelCase ( self : Optional[int] , snake_case : Dict , snake_case : Any , snake_case : Dict , snake_case : Any , snake_case : Union[str, Any] , snake_case : Tuple , snake_case : Optional[Any] ):
'''simple docstring'''
A__ : Optional[Any] = BioGptModel(config=snake_case )
model.to(snake_case )
model.eval()
A__ : List[Any] = model(snake_case , attention_mask=snake_case )
A__ : Dict = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : Optional[int] , snake_case : Dict , snake_case : Optional[int] , snake_case : List[str] , snake_case : str , snake_case : Optional[Any] , snake_case : List[str] , snake_case : List[Any] , snake_case : Tuple , snake_case : Optional[Any] , ):
'''simple docstring'''
A__ : List[str] = BioGptForCausalLM(config=snake_case )
model.to(snake_case )
model.eval()
A__ : Union[str, Any] = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self : Any , snake_case : str , snake_case : Tuple , snake_case : int , snake_case : Optional[Any] , snake_case : Any , *snake_case : Dict ):
'''simple docstring'''
A__ : Union[str, Any] = BioGptModel(config=snake_case )
model.to(snake_case )
model.eval()
# create attention mask
A__ : List[Any] = torch.ones(input_ids.shape , dtype=torch.long , device=snake_case )
A__ : Any = self.seq_length // 2
A__ : str = 0
# first forward pass
A__ , A__ : List[Any] = model(snake_case , attention_mask=snake_case ).to_tuple()
# create hypothetical next token and extent to next_input_ids
A__ : int = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
A__ : List[str] = ids_tensor((1,) , snake_case ).item() + 1
A__ : Optional[int] = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
A__ : int = random_other_next_tokens
# append to next input_ids and attn_mask
A__ : str = torch.cat([input_ids, next_tokens] , dim=-1 )
A__ : List[Any] = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=snake_case )] , dim=1 , )
# get two different outputs
A__ : Union[str, Any] = model(snake_case , attention_mask=snake_case )["""last_hidden_state"""]
A__ : Optional[int] = model(snake_case , past_key_values=snake_case , attention_mask=snake_case )["""last_hidden_state"""]
# select random slice
A__ : List[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A__ : List[str] = output_from_no_past[:, -1, random_slice_idx].detach()
A__ : Any = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case , snake_case , atol=1e-3 ) )
def _UpperCamelCase ( self : int , snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : str , snake_case : int , snake_case : Optional[Any] , *snake_case : str ):
'''simple docstring'''
A__ : Dict = BioGptModel(config=snake_case ).to(snake_case ).eval()
A__ : Tuple = torch.ones(input_ids.shape , dtype=torch.long , device=snake_case )
# first forward pass
A__ : Dict = model(snake_case , attention_mask=snake_case , use_cache=snake_case )
A__ , A__ : List[Any] = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
A__ : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ : int = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
A__ : str = torch.cat([input_ids, next_tokens] , dim=-1 )
A__ : Optional[int] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
A__ : Any = model(snake_case , attention_mask=snake_case )["""last_hidden_state"""]
A__ : Union[str, Any] = model(snake_case , attention_mask=snake_case , past_key_values=snake_case )[
"""last_hidden_state"""
]
# select random slice
A__ : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A__ : Any = output_from_no_past[:, -3:, random_slice_idx].detach()
A__ : List[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case , snake_case , atol=1e-3 ) )
def _UpperCamelCase ( self : Optional[Any] , snake_case : Tuple , snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : Any , snake_case : Tuple , *snake_case : Union[str, Any] , snake_case : Union[str, Any]=False ):
'''simple docstring'''
A__ : Tuple = BioGptForCausalLM(snake_case )
model.to(snake_case )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
A__ : Optional[Any] = model(snake_case , labels=snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def _UpperCamelCase ( self : int , snake_case : Optional[Any] , *snake_case : Optional[int] ):
'''simple docstring'''
A__ : int = BioGptModel(snake_case )
A__ : Union[str, Any] = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def _UpperCamelCase ( self : Any , snake_case : Dict , snake_case : Tuple , snake_case : int , snake_case : Union[str, Any] , snake_case : Dict , *snake_case : Any ):
'''simple docstring'''
A__ : Union[str, Any] = self.num_labels
A__ : int = BioGptForTokenClassification(snake_case )
model.to(snake_case )
model.eval()
A__ : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : List[str] = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : str = config_and_inputs
A__ : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase ):
snake_case_ = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
snake_case_ = (BioGptForCausalLM,) if is_torch_available() else ()
snake_case_ = (
{
'feature-extraction': BioGptModel,
'text-classification': BioGptForSequenceClassification,
'text-generation': BioGptForCausalLM,
'token-classification': BioGptForTokenClassification,
'zero-shot': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = False
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : List[str] = BioGptModelTester(self )
A__ : List[Any] = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
A__ : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A__ : str = type
self.model_tester.create_and_check_model(*snake_case )
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*snake_case )
def _UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
A__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*snake_case , gradient_checkpointing=snake_case )
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
A__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*snake_case )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*snake_case )
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
A__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*snake_case )
@slow
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ : Tuple = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
model.to(snake_case )
A__ : Optional[int] = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
A__ : Any = """left"""
# Define PAD Token = EOS Token = 50256
A__ : Optional[int] = tokenizer.eos_token
A__ : Dict = model.config.eos_token_id
# use different length sentences to test batching
A__ : Union[str, Any] = [
"""Hello, my dog is a little""",
"""Today, I""",
]
A__ : List[str] = tokenizer(snake_case , return_tensors="""pt""" , padding=snake_case )
A__ : str = inputs["""input_ids"""].to(snake_case )
A__ : Dict = model.generate(
input_ids=snake_case , attention_mask=inputs["""attention_mask"""].to(snake_case ) , )
A__ : Optional[int] = tokenizer(sentences[0] , return_tensors="""pt""" ).input_ids.to(snake_case )
A__ : Any = model.generate(input_ids=snake_case )
A__ : List[str] = inputs_non_padded.shape[-1] - inputs["""attention_mask"""][-1].long().sum().cpu().item()
A__ : str = tokenizer(sentences[1] , return_tensors="""pt""" ).input_ids.to(snake_case )
A__ : Dict = model.generate(input_ids=snake_case , max_length=model.config.max_length - num_paddings )
A__ : Optional[Any] = tokenizer.batch_decode(snake_case , skip_special_tokens=snake_case )
A__ : List[Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=snake_case )
A__ : str = tokenizer.decode(output_padded[0] , skip_special_tokens=snake_case )
A__ : Optional[int] = [
"""Hello, my dog is a little bit bigger than a little bit.""",
"""Today, I have a good idea of how to use the information""",
]
self.assertListEqual(snake_case , snake_case )
self.assertListEqual(snake_case , [non_padded_sentence, padded_sentence] )
@slow
def _UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Optional[Any] = BioGptModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ , A__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
A__ : Optional[int] = 3
A__ : List[Any] = input_dict["""input_ids"""]
A__ : Dict = input_ids.ne(1 ).to(snake_case )
A__ : Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
A__ : Union[str, Any] = BioGptForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
A__ : int = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ , A__ : str = self.model_tester.prepare_config_and_inputs_for_common()
A__ : Any = 3
A__ : List[Any] = """multi_label_classification"""
A__ : Dict = input_dict["""input_ids"""]
A__ : Tuple = input_ids.ne(1 ).to(snake_case )
A__ : Any = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
A__ : Tuple = BioGptForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
A__ : List[str] = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
A__ : Optional[Any] = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
A__ : str = torch.tensor([[2, 4805, 9, 656, 21]] )
A__ : Dict = model(snake_case )[0]
A__ : Tuple = 4_2384
A__ : str = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , snake_case )
A__ : str = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) )
@slow
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
A__ : Tuple = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
A__ : Any = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
model.to(snake_case )
torch.manual_seed(0 )
A__ : Tuple = tokenizer("""COVID-19 is""" , return_tensors="""pt""" ).to(snake_case )
A__ : Optional[int] = model.generate(
**snake_case , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=snake_case , )
A__ : Optional[int] = tokenizer.decode(output_ids[0] , skip_special_tokens=snake_case )
A__ : List[str] = (
"""COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the"""
""" causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and"""
""" territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),"""
""" and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and"""
""" more than 800,000 deaths."""
)
self.assertEqual(snake_case , snake_case )
| 296
| 0
|
"""simple docstring"""
def _lowerCAmelCase ( UpperCAmelCase__ : int, UpperCAmelCase__ : int, UpperCAmelCase__ : int ) ->Optional[Any]:
A__ : Dict = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def _lowerCAmelCase ( ) ->Union[str, Any]:
print(sum_of_series(1, 1, 1_0 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 360
|
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
A_ = logging.get_logger(__name__)
A_ = {'''vocab_file''': '''spiece.model'''}
A_ = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
}
}
A_ = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
# Segments (not really needed)
A_ = 0
A_ = 1
A_ = 2
A_ = 3
A_ = 4
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = 'left'
def __init__( self : Dict , snake_case : int , snake_case : List[Any]=False , snake_case : List[str]=True , snake_case : Dict=False , snake_case : Optional[Any]="<s>" , snake_case : List[str]="</s>" , snake_case : Tuple="<unk>" , snake_case : Tuple="<sep>" , snake_case : Union[str, Any]="<pad>" , snake_case : Dict="<cls>" , snake_case : Optional[Any]="<mask>" , snake_case : Optional[int]=["<eop>", "<eod>"] , snake_case : Optional[Dict[str, Any]] = None , **snake_case : Dict , ):
'''simple docstring'''
A__ : Optional[int] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else mask_token
A__ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=snake_case , remove_space=snake_case , keep_accents=snake_case , bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , additional_special_tokens=snake_case , sp_model_kwargs=self.sp_model_kwargs , **snake_case , )
A__ : str = 3
A__ : str = do_lower_case
A__ : Optional[Any] = remove_space
A__ : List[Any] = keep_accents
A__ : Union[str, Any] = vocab_file
A__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case )
@property
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
return len(self.sp_model )
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : int = {self.convert_ids_to_tokens(snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ):
'''simple docstring'''
A__ : int = self.__dict__.copy()
A__ : int = None
return state
def __setstate__( self : Tuple , snake_case : Union[str, Any] ):
'''simple docstring'''
A__ : int = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
A__ : Optional[int] = {}
A__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCamelCase ( self : List[str] , snake_case : Optional[Any] ):
'''simple docstring'''
if self.remove_space:
A__ : Optional[Any] = """ """.join(inputs.strip().split() )
else:
A__ : Dict = inputs
A__ : str = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
A__ : Any = unicodedata.normalize("""NFKD""" , snake_case )
A__ : Optional[int] = """""".join([c for c in outputs if not unicodedata.combining(snake_case )] )
if self.do_lower_case:
A__ : Any = outputs.lower()
return outputs
def _UpperCamelCase ( self : Union[str, Any] , snake_case : str ):
'''simple docstring'''
A__ : Dict = self.preprocess_text(snake_case )
A__ : Dict = self.sp_model.encode(snake_case , out_type=snake_case )
A__ : Optional[int] = []
for piece in pieces:
if len(snake_case ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
A__ : Optional[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(snake_case , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
A__ : int = cur_pieces[1:]
else:
A__ : Any = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(snake_case )
else:
new_pieces.append(snake_case )
return new_pieces
def _UpperCamelCase ( self : List[str] , snake_case : Tuple ):
'''simple docstring'''
return self.sp_model.PieceToId(snake_case )
def _UpperCamelCase ( self : List[str] , snake_case : Any ):
'''simple docstring'''
return self.sp_model.IdToPiece(snake_case )
def _UpperCamelCase ( self : Optional[int] , snake_case : Any ):
'''simple docstring'''
A__ : Union[str, Any] = """""".join(snake_case ).replace(snake_case , """ """ ).strip()
return out_string
def _UpperCamelCase ( self : int , snake_case : List[int] , snake_case : bool = False , snake_case : bool = None , snake_case : bool = True , **snake_case : Union[str, Any] , ):
'''simple docstring'''
A__ : List[str] = kwargs.pop("""use_source_tokenizer""" , snake_case )
A__ : Any = self.convert_ids_to_tokens(snake_case , skip_special_tokens=snake_case )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
A__ : Any = []
A__ : Any = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(snake_case ) )
A__ : str = []
sub_texts.append(snake_case )
else:
current_sub_text.append(snake_case )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(snake_case ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
A__ : Dict = """""".join(snake_case )
A__ : int = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
A__ : Tuple = self.clean_up_tokenization(snake_case )
return clean_text
else:
return text
def _UpperCamelCase ( self : str , snake_case : List[int] , snake_case : Optional[List[int]] = None ):
'''simple docstring'''
A__ : Tuple = [self.sep_token_id]
A__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _UpperCamelCase ( self : Dict , snake_case : List[int] , snake_case : Optional[List[int]] = None , snake_case : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case )
if token_ids_a is not None:
return ([0] * len(snake_case )) + [1] + ([0] * len(snake_case )) + [1, 1]
return ([0] * len(snake_case )) + [1, 1]
def _UpperCamelCase ( self : str , snake_case : List[int] , snake_case : Optional[List[int]] = None ):
'''simple docstring'''
A__ : Any = [self.sep_token_id]
A__ : int = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _UpperCamelCase ( self : Optional[Any] , snake_case : str , snake_case : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(snake_case ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A__ : List[Any] = os.path.join(
snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case , """wb""" ) as fi:
A__ : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(snake_case )
return (out_vocab_file,)
| 296
| 0
|
"""simple docstring"""
def _lowerCAmelCase ( UpperCAmelCase__ : int = 6_0_0_8_5_1_4_7_5_1_4_3 ) ->int:
try:
A__ : List[str] = int(lowerCAmelCase__ )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
A__ : Any = 1
A__ : str = 2
while i * i <= n:
while n % i == 0:
A__ : str = i
n //= i
i += 1
if n > 1:
A__ : str = n
return int(lowerCAmelCase__ )
if __name__ == "__main__":
print(F'{solution() = }')
| 361
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
def _lowerCAmelCase ( UpperCAmelCase__ : List[Any] ) ->List[str]:
A__ : Union[str, Any] = DPTConfig()
if "large" in checkpoint_url:
A__ : int = 1_0_2_4
A__ : Union[str, Any] = 4_0_9_6
A__ : Optional[int] = 2_4
A__ : int = 1_6
A__ : Union[str, Any] = [5, 1_1, 1_7, 2_3]
A__ : Tuple = [2_5_6, 5_1_2, 1_0_2_4, 1_0_2_4]
A__ : Tuple = (1, 3_8_4, 3_8_4)
if "ade" in checkpoint_url:
A__ : Optional[int] = True
A__ : int = 1_5_0
A__ : Union[str, Any] = """huggingface/label-files"""
A__ : List[Any] = """ade20k-id2label.json"""
A__ : Union[str, Any] = json.load(open(cached_download(hf_hub_url(UpperCAmelCase__, UpperCAmelCase__, repo_type="""dataset""" ) ), """r""" ) )
A__ : List[Any] = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()}
A__ : Dict = idalabel
A__ : List[Any] = {v: k for k, v in idalabel.items()}
A__ : Optional[Any] = [1, 1_5_0, 4_8_0, 4_8_0]
return config, expected_shape
def _lowerCAmelCase ( UpperCAmelCase__ : int ) ->Any:
A__ : List[Any] = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase__, UpperCAmelCase__ )
def _lowerCAmelCase ( UpperCAmelCase__ : Union[str, Any] ) ->List[str]:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
A__ : str = name.replace("""pretrained.model""", """dpt.encoder""" )
if "pretrained.model" in name:
A__ : Dict = name.replace("""pretrained.model""", """dpt.embeddings""" )
if "patch_embed" in name:
A__ : List[Any] = name.replace("""patch_embed""", """patch_embeddings""" )
if "pos_embed" in name:
A__ : int = name.replace("""pos_embed""", """position_embeddings""" )
if "attn.proj" in name:
A__ : Tuple = name.replace("""attn.proj""", """attention.output.dense""" )
if "proj" in name and "project" not in name:
A__ : List[Any] = name.replace("""proj""", """projection""" )
if "blocks" in name:
A__ : Optional[Any] = name.replace("""blocks""", """layer""" )
if "mlp.fc1" in name:
A__ : int = name.replace("""mlp.fc1""", """intermediate.dense""" )
if "mlp.fc2" in name:
A__ : List[str] = name.replace("""mlp.fc2""", """output.dense""" )
if "norm1" in name:
A__ : Any = name.replace("""norm1""", """layernorm_before""" )
if "norm2" in name:
A__ : List[str] = name.replace("""norm2""", """layernorm_after""" )
if "scratch.output_conv" in name:
A__ : Optional[int] = name.replace("""scratch.output_conv""", """head""" )
if "scratch" in name:
A__ : List[str] = name.replace("""scratch""", """neck""" )
if "layer1_rn" in name:
A__ : List[str] = name.replace("""layer1_rn""", """convs.0""" )
if "layer2_rn" in name:
A__ : Optional[int] = name.replace("""layer2_rn""", """convs.1""" )
if "layer3_rn" in name:
A__ : Any = name.replace("""layer3_rn""", """convs.2""" )
if "layer4_rn" in name:
A__ : Any = name.replace("""layer4_rn""", """convs.3""" )
if "refinenet" in name:
A__ : Union[str, Any] = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
A__ : str = name.replace(f'refinenet{layer_idx}', f'fusion_stage.layers.{abs(layer_idx-4 )}' )
if "out_conv" in name:
A__ : Optional[Any] = name.replace("""out_conv""", """projection""" )
if "resConfUnit1" in name:
A__ : List[Any] = name.replace("""resConfUnit1""", """residual_layer1""" )
if "resConfUnit2" in name:
A__ : Tuple = name.replace("""resConfUnit2""", """residual_layer2""" )
if "conv1" in name:
A__ : Tuple = name.replace("""conv1""", """convolution1""" )
if "conv2" in name:
A__ : List[Any] = name.replace("""conv2""", """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
A__ : Union[str, Any] = name.replace("""pretrained.act_postprocess1.0.project.0""", """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
A__ : Tuple = name.replace("""pretrained.act_postprocess2.0.project.0""", """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
A__ : Optional[Any] = name.replace("""pretrained.act_postprocess3.0.project.0""", """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
A__ : Optional[Any] = name.replace("""pretrained.act_postprocess4.0.project.0""", """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
A__ : Any = name.replace("""pretrained.act_postprocess1.3""", """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
A__ : List[Any] = name.replace("""pretrained.act_postprocess1.4""", """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
A__ : Dict = name.replace("""pretrained.act_postprocess2.3""", """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
A__ : Optional[Any] = name.replace("""pretrained.act_postprocess2.4""", """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
A__ : Union[str, Any] = name.replace("""pretrained.act_postprocess3.3""", """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
A__ : Optional[int] = name.replace("""pretrained.act_postprocess4.3""", """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
A__ : Dict = name.replace("""pretrained.act_postprocess4.4""", """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
A__ : Union[str, Any] = name.replace("""pretrained""", """dpt""" )
if "bn" in name:
A__ : Union[str, Any] = name.replace("""bn""", """batch_norm""" )
if "head" in name:
A__ : Dict = name.replace("""head""", """head.head""" )
if "encoder.norm" in name:
A__ : Optional[int] = name.replace("""encoder.norm""", """layernorm""" )
if "auxlayer" in name:
A__ : List[str] = name.replace("""auxlayer""", """auxiliary_head.head""" )
return name
def _lowerCAmelCase ( UpperCAmelCase__ : int, UpperCAmelCase__ : Dict ) ->str:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ : Any = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight' )
A__ : Tuple = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
A__ : List[str] = in_proj_weight[: config.hidden_size, :]
A__ : int = in_proj_bias[: config.hidden_size]
A__ : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ : str = in_proj_weight[
-config.hidden_size :, :
]
A__ : Optional[Any] = in_proj_bias[-config.hidden_size :]
def _lowerCAmelCase ( ) ->List[str]:
A__ : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A__ : int = Image.open(requests.get(UpperCAmelCase__, stream=UpperCAmelCase__ ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase ( UpperCAmelCase__ : int, UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : str, UpperCAmelCase__ : int ) ->str:
A__ , A__ : Dict = get_dpt_config(UpperCAmelCase__ )
# load original state_dict from URL
A__ : Any = torch.hub.load_state_dict_from_url(UpperCAmelCase__, map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(UpperCAmelCase__ )
# rename keys
for key in state_dict.copy().keys():
A__ : int = state_dict.pop(UpperCAmelCase__ )
A__ : str = val
# read in qkv matrices
read_in_q_k_v(UpperCAmelCase__, UpperCAmelCase__ )
# load HuggingFace model
A__ : Optional[Any] = DPTForSemanticSegmentation(UpperCAmelCase__ ) if """ade""" in checkpoint_url else DPTForDepthEstimation(UpperCAmelCase__ )
model.load_state_dict(UpperCAmelCase__ )
model.eval()
# Check outputs on an image
A__ : Optional[Any] = 4_8_0 if """ade""" in checkpoint_url else 3_8_4
A__ : Dict = DPTImageProcessor(size=UpperCAmelCase__ )
A__ : Optional[int] = prepare_img()
A__ : Any = image_processor(UpperCAmelCase__, return_tensors="""pt""" )
# forward pass
A__ : List[str] = model(**UpperCAmelCase__ ).logits if """ade""" in checkpoint_url else model(**UpperCAmelCase__ ).predicted_depth
# Assert logits
A__ : Optional[Any] = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]] )
if "ade" in checkpoint_url:
A__ : Optional[int] = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]] )
assert outputs.shape == torch.Size(UpperCAmelCase__ )
assert (
torch.allclose(outputs[0, 0, :3, :3], UpperCAmelCase__, atol=1e-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3], UpperCAmelCase__ )
)
Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCAmelCase__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(UpperCAmelCase__ )
if push_to_hub:
print("""Pushing model to hub...""" )
model.push_to_hub(
repo_path_or_name=Path(UpperCAmelCase__, UpperCAmelCase__ ), organization="""nielsr""", commit_message="""Add model""", use_temp_dir=UpperCAmelCase__, )
image_processor.push_to_hub(
repo_path_or_name=Path(UpperCAmelCase__, UpperCAmelCase__ ), organization="""nielsr""", commit_message="""Add image processor""", use_temp_dir=UpperCAmelCase__, )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''',
type=str,
help='''URL of the original DPT checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
parser.add_argument(
'''--model_name''',
default='''dpt-large''',
type=str,
help='''Name of the model, in case you\'re pushing to the hub.''',
)
A_ = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 296
| 0
|
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
A_ = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = field(default=UpperCamelCase , metadata={'help': 'Whether to use SortishSampler or not.'} )
snake_case_ = field(
default=UpperCamelCase , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
snake_case_ = field(
default=UpperCamelCase , metadata={
'help': (
'The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `max_length` value of the model configuration.'
)
} , )
snake_case_ = field(
default=UpperCamelCase , metadata={
'help': (
'The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `num_beams` value of the model configuration.'
)
} , )
snake_case_ = field(
default=UpperCamelCase , metadata={
'help': 'Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'
} , )
def _UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
A__ : Any = super().to_dict()
for k, v in d.items():
if isinstance(snake_case , snake_case ):
A__ : Tuple = v.to_dict()
return d
| 362
|
"""simple docstring"""
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
A_ = '''src/diffusers'''
A_ = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
A_ = importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
A_ = spec.loader.load_module()
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Optional[Any] ) ->Any:
return line.startswith(UpperCAmelCase__ ) or len(UpperCAmelCase__ ) <= 1 or re.search(R"""^\s*\)(\s*->.*:|:)\s*$""", UpperCAmelCase__ ) is not None
def _lowerCAmelCase ( UpperCAmelCase__ : List[str] ) ->Union[str, Any]:
A__ : Any = object_name.split(""".""" )
A__ : int = 0
# First let's find the module where our object lives.
A__ : str = parts[i]
while i < len(UpperCAmelCase__ ) and not os.path.isfile(os.path.join(UpperCAmelCase__, f'{module}.py' ) ):
i += 1
if i < len(UpperCAmelCase__ ):
A__ : Union[str, Any] = os.path.join(UpperCAmelCase__, parts[i] )
if i >= len(UpperCAmelCase__ ):
raise ValueError(f'`object_name` should begin with the name of a module of diffusers but got {object_name}.' )
with open(os.path.join(UpperCAmelCase__, f'{module}.py' ), """r""", encoding="""utf-8""", newline="""\n""" ) as f:
A__ : List[Any] = f.readlines()
# Now let's find the class / func in the code!
A__ : Optional[Any] = """"""
A__ : Any = 0
for name in parts[i + 1 :]:
while (
line_index < len(UpperCAmelCase__ ) and re.search(Rf'^{indent}(class|def)\s+{name}(\(|\:)', lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(UpperCAmelCase__ ):
raise ValueError(f' {object_name} does not match any function or class in {module}.' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
A__ : List[Any] = line_index
while line_index < len(UpperCAmelCase__ ) and _should_continue(lines[line_index], UpperCAmelCase__ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
A__ : List[Any] = lines[start_index:line_index]
return "".join(UpperCAmelCase__ )
A_ = re.compile(r'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
A_ = re.compile(r'''^\s*(\S+)->(\S+)(\s+.*|$)''')
A_ = re.compile(r'''<FILL\s+[^>]*>''')
def _lowerCAmelCase ( UpperCAmelCase__ : List[str] ) ->Optional[Any]:
A__ : Dict = code.split("""\n""" )
A__ : List[Any] = 0
while idx < len(UpperCAmelCase__ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(UpperCAmelCase__ ):
return re.search(R"""^(\s*)\S""", lines[idx] ).groups()[0]
return ""
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[Any] ) ->int:
A__ : str = len(get_indent(UpperCAmelCase__ ) ) > 0
if has_indent:
A__ : Union[str, Any] = f'class Bla:\n{code}'
A__ : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa}, line_length=1_1_9, preview=UpperCAmelCase__ )
A__ : Tuple = black.format_str(UpperCAmelCase__, mode=UpperCAmelCase__ )
A__ , A__ : List[Any] = style_docstrings_in_code(UpperCAmelCase__ )
return result[len("""class Bla:\n""" ) :] if has_indent else result
def _lowerCAmelCase ( UpperCAmelCase__ : Any, UpperCAmelCase__ : Dict=False ) ->List[Any]:
with open(UpperCAmelCase__, """r""", encoding="""utf-8""", newline="""\n""" ) as f:
A__ : int = f.readlines()
A__ : Dict = []
A__ : List[str] = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(UpperCAmelCase__ ):
A__ : Dict = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
A__ , A__ , A__ : Dict = search.groups()
A__ : Tuple = find_code_in_diffusers(UpperCAmelCase__ )
A__ : int = get_indent(UpperCAmelCase__ )
A__ : List[str] = line_index + 1 if indent == theoretical_indent else line_index + 2
A__ : Tuple = theoretical_indent
A__ : Optional[Any] = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
A__ : Tuple = True
while line_index < len(UpperCAmelCase__ ) and should_continue:
line_index += 1
if line_index >= len(UpperCAmelCase__ ):
break
A__ : Optional[int] = lines[line_index]
A__ : Tuple = _should_continue(UpperCAmelCase__, UpperCAmelCase__ ) and re.search(f'^{indent}# End copy', UpperCAmelCase__ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
A__ : Dict = lines[start_index:line_index]
A__ : Tuple = """""".join(UpperCAmelCase__ )
# Remove any nested `Copied from` comments to avoid circular copies
A__ : Optional[int] = [line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(UpperCAmelCase__ ) is None]
A__ : Optional[Any] = """\n""".join(UpperCAmelCase__ )
# Before comparing, use the `replace_pattern` on the original code.
if len(UpperCAmelCase__ ) > 0:
A__ : int = replace_pattern.replace("""with""", """""" ).split(""",""" )
A__ : List[Any] = [_re_replace_pattern.search(UpperCAmelCase__ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
A__ , A__ , A__ : Union[str, Any] = pattern.groups()
A__ : Union[str, Any] = re.sub(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
if option.strip() == "all-casing":
A__ : List[Any] = re.sub(obja.lower(), obja.lower(), UpperCAmelCase__ )
A__ : Tuple = re.sub(obja.upper(), obja.upper(), UpperCAmelCase__ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
A__ : Optional[int] = blackify(lines[start_index - 1] + theoretical_code )
A__ : List[Any] = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
A__ : List[Any] = lines[:start_index] + [theoretical_code] + lines[line_index:]
A__ : Tuple = start_index + 1
if overwrite and len(UpperCAmelCase__ ) > 0:
# Warn the user a file has been modified.
print(f'Detected changes, rewriting {filename}.' )
with open(UpperCAmelCase__, """w""", encoding="""utf-8""", newline="""\n""" ) as f:
f.writelines(UpperCAmelCase__ )
return diffs
def _lowerCAmelCase ( UpperCAmelCase__ : bool = False ) ->Any:
A__ : Dict = glob.glob(os.path.join(UpperCAmelCase__, """**/*.py""" ), recursive=UpperCAmelCase__ )
A__ : str = []
for filename in all_files:
A__ : Any = is_copy_consistent(UpperCAmelCase__, UpperCAmelCase__ )
diffs += [f'- {filename}: copy does not match {d[0]} at line {d[1]}' for d in new_diffs]
if not overwrite and len(UpperCAmelCase__ ) > 0:
A__ : Any = """\n""".join(UpperCAmelCase__ )
raise Exception(
"""Found the following copy inconsistencies:\n"""
+ diff
+ """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
A_ = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 296
| 0
|
"""simple docstring"""
import logging
from transformers import PretrainedConfig
A_ = logging.getLogger(__name__)
A_ = {
'''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( __lowercase ):
snake_case_ = '''bertabs'''
def __init__( self : Optional[Any] , snake_case : Optional[int]=3_0522 , snake_case : List[Any]=512 , snake_case : Any=6 , snake_case : Dict=512 , snake_case : Any=8 , snake_case : Dict=512 , snake_case : int=0.2 , snake_case : str=6 , snake_case : int=768 , snake_case : Any=8 , snake_case : List[Any]=2048 , snake_case : Any=0.2 , **snake_case : Union[str, Any] , ):
'''simple docstring'''
super().__init__(**snake_case )
A__ : str = vocab_size
A__ : Union[str, Any] = max_pos
A__ : Any = enc_layers
A__ : List[str] = enc_hidden_size
A__ : int = enc_heads
A__ : Union[str, Any] = enc_ff_size
A__ : List[str] = enc_dropout
A__ : Union[str, Any] = dec_layers
A__ : List[str] = dec_hidden_size
A__ : Optional[int] = dec_heads
A__ : Optional[int] = dec_ff_size
A__ : Union[str, Any] = dec_dropout
| 363
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A_ = {
'''configuration_llama''': ['''LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LlamaConfig'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ['''LlamaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ['''LlamaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''LlamaForCausalLM''',
'''LlamaModel''',
'''LlamaPreTrainedModel''',
'''LlamaForSequenceClassification''',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 296
| 0
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
def _lowerCAmelCase ( UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Union[str, Any]=False, UpperCAmelCase__ : str=False, UpperCAmelCase__ : List[Any]=False ) ->List[str]:
A__ : Any = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'transformer.blocks.{i}.norm1.weight', f'vilt.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'transformer.blocks.{i}.norm1.bias', f'vilt.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(f'transformer.blocks.{i}.attn.proj.weight', f'vilt.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(f'transformer.blocks.{i}.attn.proj.bias', f'vilt.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'transformer.blocks.{i}.norm2.weight', f'vilt.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'transformer.blocks.{i}.norm2.bias', f'vilt.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append(
(f'transformer.blocks.{i}.mlp.fc1.weight', f'vilt.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'transformer.blocks.{i}.mlp.fc1.bias', f'vilt.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'transformer.blocks.{i}.mlp.fc2.weight', f'vilt.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'transformer.blocks.{i}.mlp.fc2.bias', f'vilt.encoder.layer.{i}.output.dense.bias') )
# embeddings
rename_keys.extend(
[
# text embeddings
("""text_embeddings.word_embeddings.weight""", """vilt.embeddings.text_embeddings.word_embeddings.weight"""),
(
"""text_embeddings.position_embeddings.weight""",
"""vilt.embeddings.text_embeddings.position_embeddings.weight""",
),
("""text_embeddings.position_ids""", """vilt.embeddings.text_embeddings.position_ids"""),
(
"""text_embeddings.token_type_embeddings.weight""",
"""vilt.embeddings.text_embeddings.token_type_embeddings.weight""",
),
("""text_embeddings.LayerNorm.weight""", """vilt.embeddings.text_embeddings.LayerNorm.weight"""),
("""text_embeddings.LayerNorm.bias""", """vilt.embeddings.text_embeddings.LayerNorm.bias"""),
# patch embeddings
("""transformer.cls_token""", """vilt.embeddings.cls_token"""),
("""transformer.patch_embed.proj.weight""", """vilt.embeddings.patch_embeddings.projection.weight"""),
("""transformer.patch_embed.proj.bias""", """vilt.embeddings.patch_embeddings.projection.bias"""),
("""transformer.pos_embed""", """vilt.embeddings.position_embeddings"""),
# token type embeddings
("""token_type_embeddings.weight""", """vilt.embeddings.token_type_embeddings.weight"""),
] )
# final layernorm + pooler
rename_keys.extend(
[
("""transformer.norm.weight""", """vilt.layernorm.weight"""),
("""transformer.norm.bias""", """vilt.layernorm.bias"""),
("""pooler.dense.weight""", """vilt.pooler.dense.weight"""),
("""pooler.dense.bias""", """vilt.pooler.dense.bias"""),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
("""vqa_classifier.0.weight""", """classifier.0.weight"""),
("""vqa_classifier.0.bias""", """classifier.0.bias"""),
("""vqa_classifier.1.weight""", """classifier.1.weight"""),
("""vqa_classifier.1.bias""", """classifier.1.bias"""),
("""vqa_classifier.3.weight""", """classifier.3.weight"""),
("""vqa_classifier.3.bias""", """classifier.3.bias"""),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
("""nlvr2_classifier.0.weight""", """classifier.0.weight"""),
("""nlvr2_classifier.0.bias""", """classifier.0.bias"""),
("""nlvr2_classifier.1.weight""", """classifier.1.weight"""),
("""nlvr2_classifier.1.bias""", """classifier.1.bias"""),
("""nlvr2_classifier.3.weight""", """classifier.3.weight"""),
("""nlvr2_classifier.3.bias""", """classifier.3.bias"""),
] )
else:
pass
return rename_keys
def _lowerCAmelCase ( UpperCAmelCase__ : str, UpperCAmelCase__ : List[str] ) ->List[Any]:
for i in range(config.num_hidden_layers ):
A__ : Optional[Any] = """vilt."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ : Tuple = state_dict.pop(f'transformer.blocks.{i}.attn.qkv.weight' )
A__ : Optional[int] = state_dict.pop(f'transformer.blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
A__ : int = in_proj_weight[
: config.hidden_size, :
]
A__ : List[str] = in_proj_bias[: config.hidden_size]
A__ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ : str = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ : Tuple = in_proj_weight[
-config.hidden_size :, :
]
A__ : List[Any] = in_proj_bias[-config.hidden_size :]
def _lowerCAmelCase ( UpperCAmelCase__ : Union[str, Any] ) ->Dict:
A__ : Optional[Any] = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(a_, a_ )
def _lowerCAmelCase ( UpperCAmelCase__ : Any, UpperCAmelCase__ : Dict, UpperCAmelCase__ : Dict ) ->Any:
A__ : Tuple = dct.pop(a_ )
A__ : Union[str, Any] = val
@torch.no_grad()
def _lowerCAmelCase ( UpperCAmelCase__ : Any, UpperCAmelCase__ : str ) ->Tuple:
A__ : List[str] = ViltConfig(image_size=3_8_4, patch_size=3_2, tie_word_embeddings=a_ )
A__ : List[Any] = False
A__ : List[str] = False
A__ : Optional[Any] = False
A__ : Any = False
if "vqa" in checkpoint_url:
A__ : str = True
A__ : Optional[int] = 3_1_2_9
A__ : Union[str, Any] = """huggingface/label-files"""
A__ : Dict = """vqa2-id2label.json"""
A__ : List[str] = json.load(open(hf_hub_download(a_, a_, repo_type="""dataset""" ), """r""" ) )
A__ : List[str] = {int(a_ ): v for k, v in idalabel.items()}
A__ : Union[str, Any] = idalabel
A__ : Union[str, Any] = {v: k for k, v in idalabel.items()}
A__ : Dict = ViltForQuestionAnswering(a_ )
elif "nlvr" in checkpoint_url:
A__ : Union[str, Any] = True
A__ : int = 2
A__ : Optional[int] = {0: """False""", 1: """True"""}
A__ : int = {v: k for k, v in config.idalabel.items()}
A__ : Optional[int] = 3
A__ : Optional[Any] = ViltForImagesAndTextClassification(a_ )
elif "irtr" in checkpoint_url:
A__ : List[Any] = True
A__ : List[str] = ViltForImageAndTextRetrieval(a_ )
elif "mlm_itm" in checkpoint_url:
A__ : int = True
A__ : str = ViltForMaskedLM(a_ )
else:
raise ValueError("""Unknown model type""" )
# load state_dict of original model, remove and rename some keys
A__ : Union[str, Any] = torch.hub.load_state_dict_from_url(a_, map_location="""cpu""" )["""state_dict"""]
A__ : List[str] = create_rename_keys(a_, a_, a_, a_ )
for src, dest in rename_keys:
rename_key(a_, a_, a_ )
read_in_q_k_v(a_, a_ )
if mlm_model or irtr_model:
A__ : List[str] = ["""itm_score.fc.weight""", """itm_score.fc.bias"""]
for k in ignore_keys:
state_dict.pop(a_, a_ )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
A__ , A__ : Any = model.load_state_dict(a_, strict=a_ )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(a_ )
# Define processor
A__ : List[Any] = ViltImageProcessor(size=3_8_4 )
A__ : Dict = BertTokenizer.from_pretrained("""bert-base-uncased""" )
A__ : Dict = ViltProcessor(a_, a_ )
# Forward pass on example inputs (image + text)
if nlvr_model:
A__ : Union[str, Any] = Image.open(requests.get("""https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg""", stream=a_ ).raw )
A__ : str = Image.open(requests.get("""https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg""", stream=a_ ).raw )
A__ : Optional[Any] = (
"""The left image contains twice the number of dogs as the right image, and at least two dogs in total are"""
""" standing."""
)
A__ : Optional[Any] = processor(a_, a_, return_tensors="""pt""" )
A__ : List[str] = processor(a_, a_, return_tensors="""pt""" )
A__ : int = model(
input_ids=encoding_a.input_ids, pixel_values=encoding_a.pixel_values, pixel_values_a=encoding_a.pixel_values, )
else:
A__ : Optional[int] = Image.open(requests.get("""http://images.cocodataset.org/val2017/000000039769.jpg""", stream=a_ ).raw )
if mlm_model:
A__ : Optional[Any] = """a bunch of [MASK] laying on a [MASK]."""
else:
A__ : Optional[int] = """How many cats are there?"""
A__ : List[str] = processor(a_, a_, return_tensors="""pt""" )
A__ : List[str] = model(**a_ )
# Verify outputs
if mlm_model:
A__ : Optional[int] = torch.Size([1, 1_1, 3_0_5_2_2] )
A__ : str = torch.tensor([-12.5061, -12.5123, -12.5174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3], a_, atol=1e-4 )
# verify masked token prediction equals "cats"
A__ : Tuple = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
A__ : Optional[Any] = torch.Size([1, 3_1_2_9] )
A__ : Dict = torch.tensor([-15.9495, -18.1472, -10.3041] )
assert torch.allclose(outputs.logits[0, :3], a_, atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3], a_, atol=1e-4 )
# verify vqa prediction equals "2"
A__ : str = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
A__ : str = torch.Size([1, 2] )
A__ : Union[str, Any] = torch.tensor([-2.8721, 2.1291] )
assert torch.allclose(outputs.logits[0, :3], a_, atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(a_ ).mkdir(exist_ok=a_ )
print(f'Saving model and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(a_ )
processor.save_pretrained(a_ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
A_ = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 364
|
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
A_ = object()
# For specifying empty leaf dict `{}`
A_ = object()
def _lowerCAmelCase ( UpperCAmelCase__ : Tuple, UpperCAmelCase__ : List[Any] ) ->Dict:
A__ : Union[str, Any] = tuple((re.compile(x + """$""" ) for x in qs) )
for i in range(len(UpperCAmelCase__ ) - len(UpperCAmelCase__ ) + 1 ):
A__ : Optional[Any] = [x.match(UpperCAmelCase__ ) for x, y in zip(UpperCAmelCase__, ks[i:] )]
if matches and all(UpperCAmelCase__ ):
return True
return False
def _lowerCAmelCase ( UpperCAmelCase__ : List[Any] ) ->Dict:
def replace(UpperCAmelCase__ : int, UpperCAmelCase__ : List[str] ):
for rule, replacement in rules:
if _match(UpperCAmelCase__, UpperCAmelCase__ ):
return replacement
return val
return replace
def _lowerCAmelCase ( ) ->Tuple:
return [
# embeddings
(("transformer", "wpe", "embedding"), P("""mp""", UpperCAmelCase__ )),
(("transformer", "wte", "embedding"), P("""mp""", UpperCAmelCase__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(UpperCAmelCase__, """mp""" )),
(("attention", "out_proj", "kernel"), P("""mp""", UpperCAmelCase__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(UpperCAmelCase__, """mp""" )),
(("mlp", "c_fc", "bias"), P("""mp""" )),
(("mlp", "c_proj", "kernel"), P("""mp""", UpperCAmelCase__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def _lowerCAmelCase ( UpperCAmelCase__ : Tuple ) ->Any:
A__ : Union[str, Any] = _get_partition_rules()
A__ : int = _replacement_rules(UpperCAmelCase__ )
A__ : Tuple = {k: _unmatched for k in flatten_dict(UpperCAmelCase__ )}
A__ : Optional[int] = {k: replace(UpperCAmelCase__, UpperCAmelCase__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(UpperCAmelCase__ ) )
| 296
| 0
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class __SCREAMING_SNAKE_CASE :
snake_case_ = 42 # [batch_size x 3]
snake_case_ = 42 # [batch_size x 3]
snake_case_ = 42 # [batch_size x 3]
snake_case_ = 42 # [batch_size x 3]
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
def _UpperCamelCase ( self : str ):
'''simple docstring'''
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def _UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
A__ : List[str] = torch.arange(self.height * self.width )
A__ : Dict = torch.stack(
[
pixel_indices % self.width,
torch.div(_a , self.width , rounding_mode="""trunc""" ),
] , axis=1 , )
return coords
@property
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
A__ : Optional[Any] = self.shape
A__ : str = int(np.prod(_a ) )
A__ : Dict = self.get_image_coords()
A__ : int = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
A__ : int = self.get_camera_rays(_a )
A__ : Tuple = rays.view(_a , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def _UpperCamelCase ( self : Optional[int] , snake_case : Optional[int] ):
'''simple docstring'''
A__ : int = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
A__ : int = coords.view(_a , -1 , 2 )
A__ : Any = self.resolution()
A__ : List[Any] = self.fov()
A__ : str = (flat.float() / (res - 1)) * 2 - 1
A__ : List[Any] = fracs * torch.tan(fov / 2 )
A__ : Any = fracs.view(_a , -1 , 2 )
A__ : Dict = (
self.z.view(_a , 1 , 3 )
+ self.x.view(_a , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(_a , 1 , 3 ) * fracs[:, :, 1:]
)
A__ : Union[str, Any] = directions / directions.norm(dim=-1 , keepdim=_a )
A__ : List[Any] = torch.stack(
[
torch.broadcast_to(self.origin.view(_a , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(_a , *_a , 2 , 3 )
def _UpperCamelCase ( self : int , snake_case : Tuple , snake_case : int ):
'''simple docstring'''
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=_a , height=_a , x_fov=self.x_fov , y_fov=self.y_fov , )
def _lowerCAmelCase ( UpperCAmelCase__ : int ) ->DifferentiableProjectiveCamera:
A__ : List[Any] = []
A__ : str = []
A__ : str = []
A__ : Tuple = []
for theta in np.linspace(0, 2 * np.pi, num=2_0 ):
A__ : Dict = np.array([np.sin(__a ), np.cos(__a ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
A__ : List[str] = -z * 4
A__ : Dict = np.array([np.cos(__a ), -np.sin(__a ), 0.0] )
A__ : Union[str, Any] = np.cross(__a, __a )
origins.append(__a )
xs.append(__a )
ys.append(__a )
zs.append(__a )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(__a, axis=0 ) ).float(), x=torch.from_numpy(np.stack(__a, axis=0 ) ).float(), y=torch.from_numpy(np.stack(__a, axis=0 ) ).float(), z=torch.from_numpy(np.stack(__a, axis=0 ) ).float(), width=__a, height=__a, x_fov=0.7, y_fov=0.7, shape=(1, len(__a )), )
| 365
|
"""simple docstring"""
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[str] , snake_case : Tuple , snake_case : List[str]=2 , snake_case : List[str]=8 , snake_case : List[Any]=True , snake_case : Optional[Any]=True , snake_case : List[Any]=True , snake_case : Dict=True , snake_case : Tuple=99 , snake_case : Dict=16 , snake_case : Dict=5 , snake_case : int=2 , snake_case : Any=36 , snake_case : str="gelu" , snake_case : Dict=0.0 , snake_case : List[Any]=0.0 , snake_case : int=512 , snake_case : List[Any]=16 , snake_case : Tuple=2 , snake_case : Any=0.02 , snake_case : Optional[Any]=3 , snake_case : List[Any]=4 , snake_case : str=None , ):
'''simple docstring'''
A__ : Union[str, Any] = parent
A__ : Optional[Any] = batch_size
A__ : Dict = seq_length
A__ : str = is_training
A__ : Tuple = use_input_mask
A__ : Dict = use_token_type_ids
A__ : Dict = use_labels
A__ : int = vocab_size
A__ : List[str] = hidden_size
A__ : Union[str, Any] = num_hidden_layers
A__ : int = num_attention_heads
A__ : List[str] = intermediate_size
A__ : int = hidden_act
A__ : str = hidden_dropout_prob
A__ : Tuple = attention_probs_dropout_prob
A__ : Any = max_position_embeddings
A__ : Optional[int] = type_vocab_size
A__ : int = type_sequence_label_size
A__ : Optional[Any] = initializer_range
A__ : int = num_labels
A__ : Optional[int] = num_choices
A__ : Optional[int] = scope
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
A__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Any = None
if self.use_input_mask:
A__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
A__ : Optional[int] = None
if self.use_token_type_ids:
A__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ : Dict = None
A__ : List[str] = None
A__ : Union[str, Any] = None
if self.use_labels:
A__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : Any = ids_tensor([self.batch_size] , self.num_choices )
A__ : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : Any = self.get_config()
A__ : List[str] = 300
return config
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : Tuple = self.prepare_config_and_inputs()
A__ : List[str] = True
A__ : List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A__ : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _UpperCamelCase ( self : Any , snake_case : Any , snake_case : Tuple , snake_case : Any , snake_case : Optional[int] , snake_case : Optional[int] , snake_case : Optional[int] , snake_case : Dict ):
'''simple docstring'''
A__ : List[str] = MraModel(config=snake_case )
model.to(snake_case )
model.eval()
A__ : Dict = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )
A__ : List[str] = model(snake_case , token_type_ids=snake_case )
A__ : Union[str, Any] = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : Optional[Any] , snake_case : List[Any] , snake_case : Any , snake_case : Optional[Any] , snake_case : Union[str, Any] , snake_case : Tuple , snake_case : Dict , snake_case : str , snake_case : Dict , snake_case : str , ):
'''simple docstring'''
A__ : Dict = True
A__ : Optional[Any] = MraModel(snake_case )
model.to(snake_case )
model.eval()
A__ : Union[str, Any] = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , )
A__ : str = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , encoder_hidden_states=snake_case , )
A__ : Optional[int] = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : int , snake_case : Optional[Any] , snake_case : Optional[int] , snake_case : int , snake_case : str , snake_case : Union[str, Any] , snake_case : Dict , snake_case : List[str] ):
'''simple docstring'''
A__ : Union[str, Any] = MraForMaskedLM(config=snake_case )
model.to(snake_case )
model.eval()
A__ : List[Any] = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self : Optional[Any] , snake_case : Dict , snake_case : Dict , snake_case : Dict , snake_case : List[str] , snake_case : List[str] , snake_case : Tuple , snake_case : Union[str, Any] ):
'''simple docstring'''
A__ : Dict = MraForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
A__ : str = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , start_positions=snake_case , end_positions=snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self : Tuple , snake_case : List[Any] , snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : List[str] , snake_case : Optional[int] , snake_case : List[str] , snake_case : Union[str, Any] ):
'''simple docstring'''
A__ : str = self.num_labels
A__ : Optional[Any] = MraForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
A__ : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self : Union[str, Any] , snake_case : Dict , snake_case : str , snake_case : List[Any] , snake_case : Any , snake_case : Dict , snake_case : Tuple , snake_case : Optional[Any] ):
'''simple docstring'''
A__ : str = self.num_labels
A__ : Union[str, Any] = MraForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
A__ : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self : Tuple , snake_case : Optional[Any] , snake_case : Optional[int] , snake_case : int , snake_case : Optional[Any] , snake_case : List[str] , snake_case : Dict , snake_case : Optional[Any] ):
'''simple docstring'''
A__ : List[str] = self.num_choices
A__ : str = MraForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
A__ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : str = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : List[str] = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : Dict = config_and_inputs
A__ : Optional[int] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase ):
snake_case_ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = ()
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : Optional[Any] = MraModelTester(self )
A__ : List[str] = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A__ : List[str] = type
self.model_tester.create_and_check_model(*snake_case )
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case )
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case )
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
A__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case )
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case )
@slow
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : str = MraModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@unittest.skip(reason="""MRA does not output attentions""" )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
return
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : str = MraModel.from_pretrained("""uw-madison/mra-base-512-4""" )
A__ : Any = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
A__ : List[Any] = model(snake_case )[0]
A__ : List[Any] = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , snake_case )
A__ : int = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) )
@slow
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : Union[str, Any] = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-512-4""" )
A__ : Tuple = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
A__ : List[Any] = model(snake_case )[0]
A__ : Dict = 5_0265
A__ : List[str] = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , snake_case )
A__ : List[Any] = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) )
@slow
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
A__ : Any = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-4096-8-d3""" )
A__ : List[Any] = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
A__ : List[Any] = model(snake_case )[0]
A__ : Union[str, Any] = 5_0265
A__ : Optional[Any] = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape , snake_case )
A__ : Optional[int] = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) )
| 296
| 0
|
from ..utils import DummyObject, requires_backends
class __SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
snake_case_ = ["note_seq"]
def __init__( self : List[Any] , *snake_case : Optional[Any] , **snake_case : Dict ):
'''simple docstring'''
requires_backends(self , ["""note_seq"""] )
@classmethod
def _UpperCamelCase ( cls : Dict , *snake_case : Optional[Any] , **snake_case : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ["""note_seq"""] )
@classmethod
def _UpperCamelCase ( cls : Optional[Any] , *snake_case : List[Any] , **snake_case : Tuple ):
'''simple docstring'''
requires_backends(cls , ["""note_seq"""] )
| 366
|
"""simple docstring"""
from sklearn.metrics import mean_squared_error
import datasets
A_ = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
A_ = '''\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
'''
A_ = '''
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric("mse")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{\'mse\': 0.6123724356957945}
If you\'re using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric("mse", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mse\': array([0.41666667, 1. ])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"""
] , )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("""float""" ) ),
"references": datasets.Sequence(datasets.Value("""float""" ) ),
}
else:
return {
"predictions": datasets.Value("""float""" ),
"references": datasets.Value("""float""" ),
}
def _UpperCamelCase ( self : List[str] , snake_case : Dict , snake_case : List[Any] , snake_case : List[str]=None , snake_case : List[Any]="uniform_average" , snake_case : int=True ):
'''simple docstring'''
A__ : Optional[int] = mean_squared_error(
snake_case , snake_case , sample_weight=snake_case , multioutput=snake_case , squared=snake_case )
return {"mse": mse}
| 296
| 0
|
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : List[str] , snake_case : Optional[Any] , snake_case : int=100 , snake_case : Any=13 , snake_case : Union[str, Any]=30 , snake_case : Optional[Any]=2 , snake_case : List[Any]=3 , snake_case : Dict=True , snake_case : Optional[Any]=True , snake_case : int=32 , snake_case : Optional[int]=5 , snake_case : Optional[Any]=4 , snake_case : Tuple=37 , snake_case : int="gelu" , snake_case : Any=0.1 , snake_case : Optional[int]=0.1 , snake_case : Optional[int]=10 , snake_case : Optional[int]=0.02 , snake_case : List[Any]=3 , ):
'''simple docstring'''
A__ : str = parent
A__ : List[Any] = vocab_size
A__ : Optional[int] = batch_size
A__ : List[Any] = image_size
A__ : int = patch_size
A__ : str = num_channels
A__ : str = is_training
A__ : Union[str, Any] = use_labels
A__ : Optional[Any] = hidden_size
A__ : List[Any] = num_hidden_layers
A__ : List[Any] = num_attention_heads
A__ : List[str] = intermediate_size
A__ : Tuple = hidden_act
A__ : Union[str, Any] = hidden_dropout_prob
A__ : str = attention_probs_dropout_prob
A__ : Any = type_sequence_label_size
A__ : str = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A__ : int = (image_size // patch_size) ** 2
A__ : Dict = num_patches + 1
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ : List[str] = None
if self.use_labels:
A__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : str = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def _UpperCamelCase ( self : List[Any] , snake_case : int , snake_case : Optional[Any] , snake_case : List[Any] ):
'''simple docstring'''
A__ : Optional[int] = FlaxBeitModel(config=lowerCAmelCase__ )
A__ : List[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : Tuple , snake_case : Dict , snake_case : Union[str, Any] , snake_case : str ):
'''simple docstring'''
A__ : List[Any] = FlaxBeitForMaskedImageModeling(config=lowerCAmelCase__ )
A__ : Optional[int] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def _UpperCamelCase ( self : Union[str, Any] , snake_case : Union[str, Any] , snake_case : Any , snake_case : Optional[Any] ):
'''simple docstring'''
A__ : Optional[Any] = self.type_sequence_label_size
A__ : Union[str, Any] = FlaxBeitForImageClassification(config=lowerCAmelCase__ )
A__ : Optional[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A__ : str = 1
A__ : str = FlaxBeitForImageClassification(lowerCAmelCase__ )
A__ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A__ : Optional[Any] = model(lowerCAmelCase__ )
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : List[str] = self.prepare_config_and_inputs()
(
A__
) : Union[str, Any] = config_and_inputs
A__ : str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase ):
snake_case_ = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
A__ : Union[str, Any] = FlaxBeitModelTester(self )
A__ : str = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37 )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : List[str] = model_class(lowerCAmelCase__ )
A__ : Optional[int] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ : Optional[int] = [*signature.parameters.keys()]
A__ : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
A__ : Optional[Any] = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
A__ : List[Any] = model_class(lowerCAmelCase__ )
@jax.jit
def model_jitted(snake_case : List[Any] , **snake_case : Optional[int] ):
return model(pixel_values=lowerCAmelCase__ , **lowerCAmelCase__ )
with self.subTest("""JIT Enabled""" ):
A__ : Any = model_jitted(**lowerCAmelCase__ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
A__ : int = model_jitted(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) )
for jitted_output, output in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
A__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
A__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase__ )
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
A__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@slow
def _UpperCamelCase ( self : str ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
A__ : Any = model_class_name.from_pretrained("""microsoft/beit-base-patch16-224""" )
A__ : List[Any] = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(lowerCAmelCase__ )
def _lowerCAmelCase ( ) ->List[Any]:
A__ : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@require_flax
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
return BeitImageProcessor.from_pretrained("""microsoft/beit-base-patch16-224""" ) if is_vision_available() else None
@slow
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : str = FlaxBeitForMaskedImageModeling.from_pretrained("""microsoft/beit-base-patch16-224-pt22k""" )
A__ : Dict = self.default_image_processor
A__ : str = prepare_img()
A__ : Tuple = image_processor(images=lowerCAmelCase__ , return_tensors="""np""" ).pixel_values
# prepare bool_masked_pos
A__ : Optional[int] = np.ones((1, 196) , dtype=lowerCAmelCase__ )
# forward pass
A__ : Optional[int] = model(pixel_values=lowerCAmelCase__ , bool_masked_pos=lowerCAmelCase__ )
A__ : List[str] = outputs.logits
# verify the logits
A__ : Dict = (1, 196, 8192)
self.assertEqual(logits.shape , lowerCAmelCase__ )
A__ : Dict = np.array(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , lowerCAmelCase__ , atol=1e-2 ) )
@slow
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
A__ : int = FlaxBeitForImageClassification.from_pretrained("""microsoft/beit-base-patch16-224""" )
A__ : str = self.default_image_processor
A__ : str = prepare_img()
A__ : List[Any] = image_processor(images=lowerCAmelCase__ , return_tensors="""np""" )
# forward pass
A__ : Union[str, Any] = model(**lowerCAmelCase__ )
A__ : Tuple = outputs.logits
# verify the logits
A__ : str = (1, 1000)
self.assertEqual(logits.shape , lowerCAmelCase__ )
A__ : Dict = np.array([-1.2385, -1.0987, -1.0108] )
self.assertTrue(np.allclose(logits[0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
A__ : List[Any] = 281
self.assertEqual(logits.argmax(-1 ).item() , lowerCAmelCase__ )
@slow
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
A__ : Dict = FlaxBeitForImageClassification.from_pretrained("""microsoft/beit-large-patch16-224-pt22k-ft22k""" )
A__ : List[str] = self.default_image_processor
A__ : Optional[int] = prepare_img()
A__ : Tuple = image_processor(images=lowerCAmelCase__ , return_tensors="""np""" )
# forward pass
A__ : Optional[int] = model(**lowerCAmelCase__ )
A__ : List[str] = outputs.logits
# verify the logits
A__ : str = (1, 2_1841)
self.assertEqual(logits.shape , lowerCAmelCase__ )
A__ : int = np.array([1.6881, -0.2787, 0.5901] )
self.assertTrue(np.allclose(logits[0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
A__ : int = 2396
self.assertEqual(logits.argmax(-1 ).item() , lowerCAmelCase__ )
| 367
|
"""simple docstring"""
import warnings
from ..trainer import Trainer
from ..utils import logging
A_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
def __init__( self : Optional[int] , snake_case : List[str]=None , **snake_case : Any ):
'''simple docstring'''
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""" , snake_case , )
super().__init__(args=snake_case , **snake_case )
| 296
| 0
|
from __future__ import annotations
def _lowerCAmelCase ( UpperCAmelCase__ : int | str ) ->bool:
A__ : int = str(_UpperCamelCase )
return n == n[::-1]
def _lowerCAmelCase ( UpperCAmelCase__ : int = 1_0_0_0_0_0_0 ) ->Any:
A__ : int = 0
for i in range(1, _UpperCamelCase ):
if is_palindrome(_UpperCamelCase ) and is_palindrome(bin(_UpperCamelCase ).split("""b""" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 368
|
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
A_ = random.Random()
def _lowerCAmelCase ( UpperCAmelCase__ : Dict, UpperCAmelCase__ : Tuple=1.0, UpperCAmelCase__ : Optional[int]=None, UpperCAmelCase__ : str=None ) ->Union[str, Any]:
if rng is None:
A__ : Optional[int] = global_rng
A__ : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Union[str, Any] , snake_case : str , snake_case : List[str]=7 , snake_case : str=400 , snake_case : Optional[Any]=2000 , snake_case : Union[str, Any]=10 , snake_case : str=160 , snake_case : List[str]=8 , snake_case : List[Any]=0.0 , snake_case : Optional[Any]=4000 , snake_case : Any=False , snake_case : int=True , ):
'''simple docstring'''
A__ : Any = parent
A__ : str = batch_size
A__ : List[str] = min_seq_length
A__ : Dict = max_seq_length
A__ : str = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A__ : Dict = padding_value
A__ : Optional[Any] = sampling_rate
A__ : Any = return_attention_mask
A__ : Optional[int] = do_normalize
A__ : Tuple = feature_size
A__ : Optional[Any] = chunk_length
A__ : Union[str, Any] = hop_length
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _UpperCamelCase ( self : Union[str, Any] , snake_case : Dict=False , snake_case : Optional[Any]=False ):
'''simple docstring'''
def _flatten(snake_case : Dict ):
return list(itertools.chain(*snake_case ) )
if equal_length:
A__ : Dict = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
A__ : Optional[int] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
A__ : List[str] = [np.asarray(snake_case ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase ):
snake_case_ = WhisperFeatureExtractor if is_speech_available() else None
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
A__ : str = WhisperFeatureExtractionTester(self )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : List[Any] = feat_extract_first.save_pretrained(snake_case )[0]
check_json_file_has_correct_format(snake_case )
A__ : Union[str, Any] = self.feature_extraction_class.from_pretrained(snake_case )
A__ : str = feat_extract_first.to_dict()
A__ : Union[str, Any] = feat_extract_second.to_dict()
A__ : List[Any] = feat_extract_first.mel_filters
A__ : Optional[Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(snake_case , snake_case ) )
self.assertEqual(snake_case , snake_case )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Any = os.path.join(snake_case , """feat_extract.json""" )
feat_extract_first.to_json_file(snake_case )
A__ : int = self.feature_extraction_class.from_json_file(snake_case )
A__ : Dict = feat_extract_first.to_dict()
A__ : str = feat_extract_second.to_dict()
A__ : str = feat_extract_first.mel_filters
A__ : Dict = feat_extract_second.mel_filters
self.assertTrue(np.allclose(snake_case , snake_case ) )
self.assertEqual(snake_case , snake_case )
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
A__ : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
A__ : str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
A__ : Union[str, Any] = [np.asarray(snake_case ) for speech_input in speech_inputs]
# Test feature size
A__ : Dict = feature_extractor(snake_case , padding="""max_length""" , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
A__ : str = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
A__ : Optional[int] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(snake_case , snake_case , atol=1e-3 ) )
# Test batched
A__ : List[str] = feature_extractor(snake_case , return_tensors="""np""" ).input_features
A__ : List[str] = feature_extractor(snake_case , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(snake_case , snake_case ):
self.assertTrue(np.allclose(snake_case , snake_case , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
A__ : Tuple = [floats_list((1, x) )[0] for x in (800, 800, 800)]
A__ : str = np.asarray(snake_case )
A__ : List[str] = feature_extractor(snake_case , return_tensors="""np""" ).input_features
A__ : Optional[int] = feature_extractor(snake_case , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(snake_case , snake_case ):
self.assertTrue(np.allclose(snake_case , snake_case , atol=1e-3 ) )
# Test truncation required
A__ : Optional[Any] = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
A__ : Union[str, Any] = [np.asarray(snake_case ) for speech_input in speech_inputs]
A__ : Union[str, Any] = [x[: feature_extractor.n_samples] for x in speech_inputs]
A__ : str = [np.asarray(snake_case ) for speech_input in speech_inputs_truncated]
A__ : Optional[int] = feature_extractor(snake_case , return_tensors="""np""" ).input_features
A__ : str = feature_extractor(snake_case , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(snake_case , snake_case ):
self.assertTrue(np.allclose(snake_case , snake_case , atol=1e-3 ) )
def _UpperCamelCase ( self : str ):
'''simple docstring'''
import torch
A__ : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ : List[str] = np.random.rand(100 , 32 ).astype(np.floataa )
A__ : Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
A__ : Optional[Any] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
A__ : Optional[int] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def _UpperCamelCase ( self : Optional[Any] , snake_case : Optional[int] ):
'''simple docstring'''
A__ : int = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
A__ : Union[str, Any] = ds.sort("""id""" ).select(range(snake_case ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : str = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
A__ : Optional[Any] = self._load_datasamples(1 )
A__ : Union[str, Any] = WhisperFeatureExtractor()
A__ : List[str] = feature_extractor(snake_case , return_tensors="""pt""" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , snake_case , atol=1e-4 ) )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ : Union[str, Any] = self._load_datasamples(1 )[0]
A__ : Any = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
A__ : str = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=snake_case )[0]
self.assertTrue(np.all(np.mean(snake_case ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(snake_case ) - 1 ) < 1e-3 ) )
| 296
| 0
|
"""simple docstring"""
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def _lowerCAmelCase ( UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : List[str], UpperCAmelCase__ : List[str]=None, UpperCAmelCase__ : List[Any]=None, UpperCAmelCase__ : Optional[Any]=None, UpperCAmelCase__ : Any=None, UpperCAmelCase__ : Union[str, Any]=None, ) ->List[Any]:
if attention_mask is None:
A__ : Dict = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
A__ : List[Any] = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
A__ : int = torch.ones(config.encoder_layers, config.encoder_attention_heads, device=a__ )
if decoder_head_mask is None:
A__ : Dict = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=a__ )
if cross_attn_head_mask is None:
A__ : Optional[int] = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=a__ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class __SCREAMING_SNAKE_CASE :
def __init__( self : int , snake_case : str , snake_case : Tuple=13 , snake_case : List[Any]=7 , snake_case : List[str]=True , snake_case : Optional[int]=False , snake_case : str=99 , snake_case : Dict=16 , snake_case : List[str]=2 , snake_case : Union[str, Any]=4 , snake_case : List[Any]=4 , snake_case : Optional[Any]="relu" , snake_case : Tuple=0.1 , snake_case : Tuple=0.1 , snake_case : List[Any]=0.0 , snake_case : str=0.0 , snake_case : Any=20 , snake_case : List[Any]=2 , snake_case : Tuple=1 , snake_case : List[str]=0 , ):
'''simple docstring'''
A__ : List[str] = parent
A__ : Optional[int] = batch_size
A__ : Optional[int] = seq_length
A__ : Tuple = is_training
A__ : List[Any] = use_labels
A__ : Any = vocab_size
A__ : int = hidden_size
A__ : str = num_hidden_layers
A__ : Optional[Any] = num_attention_heads
A__ : str = intermediate_size
A__ : int = hidden_act
A__ : str = hidden_dropout_prob
A__ : List[str] = attention_probs_dropout_prob
A__ : str = encoder_layerdrop
A__ : Any = decoder_layerdrop
A__ : str = max_position_embeddings
A__ : Any = eos_token_id
A__ : int = pad_token_id
A__ : str = bos_token_id
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
A__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : int = self.eos_token_id # Eos Token
A__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
A__ : List[str] = input_ids.clamp(self.pad_token_id + 1 )
A__ : int = decoder_input_ids.clamp(self.pad_token_id + 1 )
A__ : Tuple = self.get_config()
A__ : str = prepare_mam_aaa_inputs_dict(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return config, inputs_dict
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ : int = self.prepare_config_and_inputs()
return config, inputs_dict
def _UpperCamelCase ( self : Union[str, Any] , snake_case : str , snake_case : Optional[int] ):
'''simple docstring'''
A__ : Optional[Any] = MaMaaaModel(config=_lowerCamelCase ).get_decoder().to(_lowerCamelCase ).eval()
A__ : int = inputs_dict['''input_ids''']
A__ : str = inputs_dict['''attention_mask''']
A__ : int = inputs_dict['''head_mask''']
# first forward pass
A__ : Optional[Any] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , head_mask=_lowerCamelCase , use_cache=_lowerCamelCase )
A__ : int = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
A__ : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ : List[Any] = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
A__ : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
A__ : Optional[int] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
A__ : str = model(_lowerCamelCase , attention_mask=_lowerCamelCase )['''last_hidden_state''']
A__ : Dict = model(_lowerCamelCase , attention_mask=_lowerCamelCase , past_key_values=_lowerCamelCase )[
'''last_hidden_state'''
]
# select random slice
A__ : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A__ : Optional[int] = output_from_no_past[:, -3:, random_slice_idx].detach()
A__ : Optional[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-2 ) )
def _UpperCamelCase ( self : Dict , snake_case : List[str] , snake_case : List[Any] ):
'''simple docstring'''
A__ : List[Any] = MaMaaaModel(config=_lowerCamelCase ).to(_lowerCamelCase ).eval()
A__ : Tuple = model(**_lowerCamelCase )
A__ : Any = outputs.encoder_last_hidden_state
A__ : Optional[int] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : str = model.get_encoder()
encoder.save_pretrained(_lowerCamelCase )
A__ : List[str] = MaMaaaEncoder.from_pretrained(_lowerCamelCase ).to(_lowerCamelCase )
A__ : int = encoder(inputs_dict["""input_ids"""] , attention_mask=inputs_dict["""attention_mask"""] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : List[Any] = model.get_decoder()
decoder.save_pretrained(_lowerCamelCase )
A__ : int = MaMaaaDecoder.from_pretrained(_lowerCamelCase ).to(_lowerCamelCase )
A__ : List[Any] = decoder(
input_ids=inputs_dict["""decoder_input_ids"""] , attention_mask=inputs_dict["""decoder_attention_mask"""] , encoder_hidden_states=_lowerCamelCase , encoder_attention_mask=inputs_dict["""attention_mask"""] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class __SCREAMING_SNAKE_CASE ( a__ , a__ , a__ , unittest.TestCase ):
snake_case_ = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
snake_case_ = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
snake_case_ = (
{
'conversational': MaMaaaForConditionalGeneration,
'feature-extraction': MaMaaaModel,
'summarization': MaMaaaForConditionalGeneration,
'text2text-generation': MaMaaaForConditionalGeneration,
'translation': MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
snake_case_ = True
snake_case_ = True
snake_case_ = False
snake_case_ = False
def _UpperCamelCase ( self : Any , snake_case : str , snake_case : Optional[Any] , snake_case : Optional[int] , snake_case : Optional[Any] , snake_case : str ):
'''simple docstring'''
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
A__ : Dict = MaMaaaModelTester(self )
A__ : Dict = ConfigTester(self , config_class=_lowerCamelCase )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
A__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
A__ : Dict = model_class(_lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCamelCase )
A__ : str = model_class.from_pretrained(_lowerCamelCase , output_loading_info=_lowerCamelCase )
self.assertEqual(info["""missing_keys"""] , [] )
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*_lowerCamelCase )
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
A__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*_lowerCamelCase )
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
A__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
A__ : Any = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A__ : Optional[int] = copy.deepcopy(self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
if not self.is_encoder_decoder:
A__ : int = inputs['''input_ids''']
del inputs["input_ids"]
else:
A__ : int = inputs['''input_ids''']
A__ : int = inputs.get("""decoder_input_ids""" , _lowerCamelCase )
del inputs["input_ids"]
inputs.pop("""decoder_input_ids""" , _lowerCamelCase )
A__ : List[Any] = model.get_input_embeddings()
if not self.is_encoder_decoder:
A__ : Optional[Any] = wte(_lowerCamelCase )
else:
A__ : List[str] = wte(_lowerCamelCase )
A__ : Optional[int] = wte(_lowerCamelCase )
with torch.no_grad():
model(**_lowerCamelCase )[0]
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : Any = self.model_tester.prepare_config_and_inputs()
A__ : Optional[int] = input_dict['''input_ids''']
A__ : Tuple = input_ids.ne(1 ).to(_lowerCamelCase )
A__ : Optional[int] = MaMaaaForConditionalGeneration(_lowerCamelCase ).eval().to(_lowerCamelCase )
if torch_device == "cuda":
model.half()
model.generate(_lowerCamelCase , attention_mask=_lowerCamelCase )
model.generate(num_beams=4 , do_sample=_lowerCamelCase , early_stopping=_lowerCamelCase , num_return_sequences=3 )
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[int] ) ->List[str]:
return torch.tensor(a__, dtype=torch.long, device=a__ )
A_ = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
return MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" )
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
A__ : List[str] = MaMaaaModel.from_pretrained("""facebook/m2m100_418M""" ).to(_lowerCamelCase )
A__ : Dict = _long_tensor([[12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38, 2]] )
A__ : Any = _long_tensor([[2, 12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38]] )
A__ : int = prepare_mam_aaa_inputs_dict(model.config , _lowerCamelCase , _lowerCamelCase )
with torch.no_grad():
A__ : str = model(**_lowerCamelCase )[0]
A__ : Tuple = torch.Size((1, 11, 1024) )
self.assertEqual(output.shape , _lowerCamelCase )
# change to expected output here
A__ : List[str] = torch.tensor(
[[-0.7780, -0.1676, 0.1038], [-6.7556, -1.3992, 0.0567], [-7.5383, -0.5920, -0.2779]] , device=_lowerCamelCase )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
def _UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
A__ : Tuple = MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(_lowerCamelCase )
# change to intended input
A__ : List[Any] = _long_tensor([[12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38, 2]] )
A__ : Optional[int] = _long_tensor([[2, 12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38]] )
A__ : Any = prepare_mam_aaa_inputs_dict(model.config , _lowerCamelCase , _lowerCamelCase )
with torch.no_grad():
A__ : int = model(**_lowerCamelCase )[0]
A__ : Tuple = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape , _lowerCamelCase )
# change to expected output here
A__ : List[str] = torch.tensor(
[[-1.0448, -1.0411, 3.7992], [-3.2191, -3.2386, -1.3451], [-3.6210, -3.5993, 0.4925]] , device=_lowerCamelCase )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : List[str] = MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(_lowerCamelCase )
A__ : Optional[Any] = MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" , src_lang="""fr""" , tgt_lang="""en""" )
A__ : str = [
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent'''
''' Fabius convoque l\'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de'''
''' l\'ampleur de la surveillance américaine sur l\'ensemble des communications en France.''',
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
A__ : List[str] = tokenizer(_lowerCamelCase , padding=_lowerCamelCase , return_tensors="""pt""" )
A__ : str = model.generate(
input_ids=dct["""input_ids"""].to(_lowerCamelCase ) , attention_mask=dct["""attention_mask"""].to(_lowerCamelCase ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("""en""" ) , )
A__ : Dict = [
'''The NSA case highlights the total absence of intelligence debate''',
'''I think there are two levels of response from the French government.''',
'''When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S.'''
''' Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all'''
''' communications in France.''',
]
A__ : int = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
assert generated == expected_en
| 369
|
"""simple docstring"""
import numpy as np
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[str] ):
'''simple docstring'''
A__ : Optional[int] = (0, 0)
A__ : Dict = None
A__ : int = 0
A__ : str = 0
A__ : Optional[Any] = 0
def __eq__( self : str , snake_case : Optional[int] ):
'''simple docstring'''
return self.position == cell.position
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
print(self.position )
class __SCREAMING_SNAKE_CASE :
def __init__( self : int , snake_case : Any=(5, 5) ):
'''simple docstring'''
A__ : Optional[int] = np.zeros(snake_case )
A__ : List[Any] = world_size[0]
A__ : Dict = world_size[1]
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
print(self.w )
def _UpperCamelCase ( self : Optional[int] , snake_case : List[Any] ):
'''simple docstring'''
A__ : int = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
A__ : int = cell.position[0]
A__ : str = cell.position[1]
A__ : Any = []
for n in neughbour_cord:
A__ : List[Any] = current_x + n[0]
A__ : Tuple = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
A__ : List[Any] = Cell()
A__ : str = (x, y)
A__ : Optional[Any] = cell
neighbours.append(snake_case )
return neighbours
def _lowerCAmelCase ( UpperCAmelCase__ : List[str], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Dict ) ->Dict:
A__ : Union[str, Any] = []
A__ : Optional[int] = []
_open.append(UpperCAmelCase__ )
while _open:
A__ : List[Any] = np.argmin([n.f for n in _open] )
A__ : Union[str, Any] = _open[min_f]
_closed.append(_open.pop(UpperCAmelCase__ ) )
if current == goal:
break
for n in world.get_neigbours(UpperCAmelCase__ ):
for c in _closed:
if c == n:
continue
A__ : Dict = current.g + 1
A__ , A__ : int = n.position
A__ , A__ : Optional[int] = goal.position
A__ : Union[str, Any] = (ya - ya) ** 2 + (xa - xa) ** 2
A__ : Optional[int] = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(UpperCAmelCase__ )
A__ : List[str] = []
while current.parent is not None:
path.append(current.position )
A__ : Union[str, Any] = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
A_ = Gridworld()
# Start position and goal
A_ = Cell()
A_ = (0, 0)
A_ = Cell()
A_ = (4, 4)
print(F'path from {start.position} to {goal.position}')
A_ = astar(world, start, goal)
# Just for visual reasons.
for i in s:
A_ = 1
print(world.w)
| 296
| 0
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : List[str] = tempfile.mkdtemp()
# fmt: off
A__ : Dict = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest"""]
# fmt: on
A__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
A__ : Union[str, Any] = {
"""do_resize""": True,
"""size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.5, 0.5, 0.5],
"""image_std""": [0.5, 0.5, 0.5],
}
A__ : List[Any] = os.path.join(self.tmpdirname , _lowerCAmelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
def _UpperCamelCase ( self : Dict , **snake_case : Dict ):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def _UpperCamelCase ( self : Dict , **snake_case : List[Any] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A__ : int = [Image.fromarray(np.moveaxis(_lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
A__ : Dict = self.get_tokenizer()
A__ : List[str] = self.get_image_processor()
A__ : Optional[int] = VisionTextDualEncoderProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
A__ : int = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCAmelCase )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : Union[str, Any] = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A__ : List[str] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A__ : Tuple = self.get_image_processor(do_normalize=_lowerCAmelCase , padding_value=1.0 )
A__ : Optional[int] = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCAmelCase )
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
A__ : Optional[int] = self.get_image_processor()
A__ : Union[str, Any] = self.get_tokenizer()
A__ : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
A__ : List[Any] = self.prepare_image_inputs()
A__ : Optional[Any] = image_processor(_lowerCAmelCase , return_tensors="""np""" )
A__ : List[Any] = processor(images=_lowerCAmelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : List[str] = self.get_image_processor()
A__ : Tuple = self.get_tokenizer()
A__ : str = VisionTextDualEncoderProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
A__ : Dict = """lower newer"""
A__ : Dict = processor(text=_lowerCAmelCase )
A__ : int = tokenizer(_lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : Any = self.get_image_processor()
A__ : Any = self.get_tokenizer()
A__ : List[str] = VisionTextDualEncoderProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
A__ : Any = """lower newer"""
A__ : Optional[Any] = self.prepare_image_inputs()
A__ : Dict = processor(text=_lowerCAmelCase , images=_lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with self.assertRaises(_lowerCAmelCase ):
processor()
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : Any = self.get_image_processor()
A__ : Optional[Any] = self.get_tokenizer()
A__ : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
A__ : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A__ : List[Any] = processor.batch_decode(_lowerCAmelCase )
A__ : Any = tokenizer.batch_decode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
A__ : str = self.get_image_processor()
A__ : Union[str, Any] = self.get_tokenizer()
A__ : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
A__ : str = """lower newer"""
A__ : int = self.prepare_image_inputs()
A__ : int = processor(text=_lowerCAmelCase , images=_lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 370
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
def _lowerCAmelCase ( UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Tuple=False ) ->str:
A__ : Optional[int] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'blocks.{i}.norm1.weight', f'deit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'blocks.{i}.norm1.bias', f'deit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((f'blocks.{i}.attn.proj.weight', f'deit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((f'blocks.{i}.attn.proj.bias', f'deit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'blocks.{i}.norm2.weight', f'deit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'blocks.{i}.norm2.bias', f'deit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'deit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'deit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'deit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'deit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """deit.embeddings.cls_token"""),
("""dist_token""", """deit.embeddings.distillation_token"""),
("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """deit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
A__ : Optional[int] = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("""norm.weight""", """deit.layernorm.weight"""),
("""norm.bias""", """deit.layernorm.bias"""),
("""head.weight""", """cls_classifier.weight"""),
("""head.bias""", """cls_classifier.bias"""),
("""head_dist.weight""", """distillation_classifier.weight"""),
("""head_dist.bias""", """distillation_classifier.bias"""),
] )
return rename_keys
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Tuple, UpperCAmelCase__ : List[Any]=False ) ->str:
for i in range(config.num_hidden_layers ):
if base_model:
A__ : Any = """"""
else:
A__ : Tuple = """deit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ : Any = state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
A__ : Tuple = state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
A__ : List[Any] = in_proj_weight[
: config.hidden_size, :
]
A__ : str = in_proj_bias[: config.hidden_size]
A__ : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
A__ : Any = in_proj_bias[-config.hidden_size :]
def _lowerCAmelCase ( UpperCAmelCase__ : List[Any], UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Union[str, Any] ) ->Any:
A__ : int = dct.pop(UpperCAmelCase__ )
A__ : Tuple = val
def _lowerCAmelCase ( ) ->List[Any]:
A__ : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A__ : int = Image.open(requests.get(UpperCAmelCase__, stream=UpperCAmelCase__ ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase ( UpperCAmelCase__ : Dict, UpperCAmelCase__ : Any ) ->Tuple:
A__ : List[Any] = DeiTConfig()
# all deit models have fine-tuned heads
A__ : Tuple = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
A__ : str = 1_0_0_0
A__ : List[str] = """huggingface/label-files"""
A__ : Dict = """imagenet-1k-id2label.json"""
A__ : List[str] = json.load(open(hf_hub_download(UpperCAmelCase__, UpperCAmelCase__, repo_type="""dataset""" ), """r""" ) )
A__ : Dict = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()}
A__ : Optional[int] = idalabel
A__ : Dict = {v: k for k, v in idalabel.items()}
A__ : List[str] = int(deit_name[-6:-4] )
A__ : str = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("""tiny""" ):
A__ : List[str] = 1_9_2
A__ : int = 7_6_8
A__ : List[Any] = 1_2
A__ : Dict = 3
elif deit_name[9:].startswith("""small""" ):
A__ : List[Any] = 3_8_4
A__ : List[str] = 1_5_3_6
A__ : Any = 1_2
A__ : Union[str, Any] = 6
if deit_name[9:].startswith("""base""" ):
pass
elif deit_name[4:].startswith("""large""" ):
A__ : int = 1_0_2_4
A__ : str = 4_0_9_6
A__ : Any = 2_4
A__ : int = 1_6
# load original model from timm
A__ : Dict = timm.create_model(UpperCAmelCase__, pretrained=UpperCAmelCase__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A__ : Tuple = timm_model.state_dict()
A__ : str = create_rename_keys(UpperCAmelCase__, UpperCAmelCase__ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
read_in_q_k_v(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
# load HuggingFace model
A__ : str = DeiTForImageClassificationWithTeacher(UpperCAmelCase__ ).eval()
model.load_state_dict(UpperCAmelCase__ )
# Check outputs on an image, prepared by DeiTImageProcessor
A__ : int = int(
(2_5_6 / 2_2_4) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
A__ : Any = DeiTImageProcessor(size=UpperCAmelCase__, crop_size=config.image_size )
A__ : Union[str, Any] = image_processor(images=prepare_img(), return_tensors="""pt""" )
A__ : Optional[Any] = encoding["""pixel_values"""]
A__ : Union[str, Any] = model(UpperCAmelCase__ )
A__ : Union[str, Any] = timm_model(UpperCAmelCase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(UpperCAmelCase__, outputs.logits, atol=1e-3 )
Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ )
print(f'Saving model {deit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCAmelCase__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--deit_name''',
default='''vit_deit_base_distilled_patch16_224''',
type=str,
help='''Name of the DeiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
A_ = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 296
| 0
|
"""simple docstring"""
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
A_ = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n"
A_ = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n"
A_ = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] , )
def _UpperCamelCase ( self : Dict , snake_case : Any , snake_case : int , snake_case : Tuple=4 , snake_case : Tuple=False ):
'''simple docstring'''
A__ : int = compute_bleu(
reference_corpus=lowercase_ , translation_corpus=lowercase_ , max_order=lowercase_ , smooth=lowercase_ )
(A__) : Optional[int] = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 371
|
"""simple docstring"""
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def _lowerCAmelCase ( UpperCAmelCase__ : Sequence[float], UpperCAmelCase__ : int, UpperCAmelCase__ : int ) ->tuple[int | None, int | None, float]:
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
A__ : Optional[int] = (low + high) // 2
A__ , A__ , A__ : List[Any] = max_subarray(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
A__ , A__ , A__ : Union[str, Any] = max_subarray(UpperCAmelCase__, mid + 1, UpperCAmelCase__ )
A__ , A__ , A__ : Union[str, Any] = max_cross_sum(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def _lowerCAmelCase ( UpperCAmelCase__ : Sequence[float], UpperCAmelCase__ : int, UpperCAmelCase__ : int, UpperCAmelCase__ : int ) ->tuple[int, int, float]:
A__ , A__ : Dict = float("""-inf""" ), -1
A__ , A__ : Optional[Any] = float("""-inf""" ), -1
A__ : int | float = 0
for i in range(UpperCAmelCase__, low - 1, -1 ):
summ += arr[i]
if summ > left_sum:
A__ : Optional[int] = summ
A__ : Union[str, Any] = i
A__ : Optional[Any] = 0
for i in range(mid + 1, high + 1 ):
summ += arr[i]
if summ > right_sum:
A__ : int = summ
A__ : Union[str, Any] = i
return max_left, max_right, (left_sum + right_sum)
def _lowerCAmelCase ( UpperCAmelCase__ : int ) ->float:
A__ : Union[str, Any] = [randint(1, UpperCAmelCase__ ) for _ in range(UpperCAmelCase__ )]
A__ : Any = time.time()
max_subarray(UpperCAmelCase__, 0, input_size - 1 )
A__ : List[Any] = time.time()
return end - start
def _lowerCAmelCase ( ) ->None:
A__ : List[Any] = [1_0, 1_0_0, 1_0_0_0, 1_0_0_0_0, 5_0_0_0_0, 1_0_0_0_0_0, 2_0_0_0_0_0, 3_0_0_0_0_0, 4_0_0_0_0_0, 5_0_0_0_0_0]
A__ : Any = [time_max_subarray(UpperCAmelCase__ ) for input_size in input_sizes]
print("""No of Inputs\t\tTime Taken""" )
for input_size, runtime in zip(UpperCAmelCase__, UpperCAmelCase__ ):
print(UpperCAmelCase__, """\t\t""", UpperCAmelCase__ )
plt.plot(UpperCAmelCase__, UpperCAmelCase__ )
plt.xlabel("""Number of Inputs""" )
plt.ylabel("""Time taken in seconds""" )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 296
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A_ = {
'''configuration_bert''': ['''BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BertConfig''', '''BertOnnxConfig'''],
'''tokenization_bert''': ['''BasicTokenizer''', '''BertTokenizer''', '''WordpieceTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ['''BertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BertForMaskedLM''',
'''BertForMultipleChoice''',
'''BertForNextSentencePrediction''',
'''BertForPreTraining''',
'''BertForQuestionAnswering''',
'''BertForSequenceClassification''',
'''BertForTokenClassification''',
'''BertLayer''',
'''BertLMHeadModel''',
'''BertModel''',
'''BertPreTrainedModel''',
'''load_tf_weights_in_bert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBertEmbeddings''',
'''TFBertForMaskedLM''',
'''TFBertForMultipleChoice''',
'''TFBertForNextSentencePrediction''',
'''TFBertForPreTraining''',
'''TFBertForQuestionAnswering''',
'''TFBertForSequenceClassification''',
'''TFBertForTokenClassification''',
'''TFBertLMHeadModel''',
'''TFBertMainLayer''',
'''TFBertModel''',
'''TFBertPreTrainedModel''',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ['''TFBertTokenizer''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''FlaxBertForCausalLM''',
'''FlaxBertForMaskedLM''',
'''FlaxBertForMultipleChoice''',
'''FlaxBertForNextSentencePrediction''',
'''FlaxBertForPreTraining''',
'''FlaxBertForQuestionAnswering''',
'''FlaxBertForSequenceClassification''',
'''FlaxBertForTokenClassification''',
'''FlaxBertModel''',
'''FlaxBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 350
|
"""simple docstring"""
from __future__ import annotations
class __SCREAMING_SNAKE_CASE :
def __init__( self : Dict , snake_case : int ):
'''simple docstring'''
A__ : List[Any] = order
# a_{0} ... a_{k}
A__ : List[Any] = [1.0] + [0.0] * order
# b_{0} ... b_{k}
A__ : str = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
A__ : Union[str, Any] = [0.0] * self.order
# y[n-1] ... y[n-k]
A__ : List[str] = [0.0] * self.order
def _UpperCamelCase ( self : Optional[int] , snake_case : list[float] , snake_case : list[float] ):
'''simple docstring'''
if len(snake_case ) < self.order:
A__ : Any = [1.0, *a_coeffs]
if len(snake_case ) != self.order + 1:
A__ : str = (
F'Expected a_coeffs to have {self.order + 1} elements '
F'for {self.order}-order filter, got {len(snake_case )}'
)
raise ValueError(snake_case )
if len(snake_case ) != self.order + 1:
A__ : Union[str, Any] = (
F'Expected b_coeffs to have {self.order + 1} elements '
F'for {self.order}-order filter, got {len(snake_case )}'
)
raise ValueError(snake_case )
A__ : Dict = a_coeffs
A__ : Any = b_coeffs
def _UpperCamelCase ( self : List[str] , snake_case : float ):
'''simple docstring'''
A__ : str = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
A__ : Dict = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
A__ : Tuple = self.input_history[:-1]
A__ : int = self.output_history[:-1]
A__ : Dict = sample
A__ : Tuple = result
return result
| 296
| 0
|
"""simple docstring"""
import os
import sys
import unittest
A_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
A_ = os.path.join(git_repo_path, '''src''', '''transformers''')
A_ = '''\n{0} = None\n'''
A_ = '''\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n'''
A_ = '''\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'''
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
A__ : Union[str, Any] = find_backend(""" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")""" )
self.assertIsNone(lowercase_ )
A__ : str = find_backend(""" if not is_tokenizers_available():""" )
self.assertEqual(lowercase_ , """tokenizers""" )
A__ : Dict = find_backend(""" if not is_tensorflow_text_available():""" )
self.assertEqual(lowercase_ , """tensorflow_text""" )
A__ : List[str] = find_backend(""" if not (is_sentencepiece_available() and is_tokenizers_available()):""" )
self.assertEqual(lowercase_ , """sentencepiece_and_tokenizers""" )
A__ : List[str] = find_backend(
""" if not (is_sentencepiece_available() and is_tensorflow_text_available()):""" )
self.assertEqual(lowercase_ , """sentencepiece_and_tensorflow_text""" )
A__ : List[str] = find_backend(
""" if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):""" )
self.assertEqual(lowercase_ , """sentencepiece_and_tokenizers_and_vision""" )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : List[str] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""" , lowercase_ )
self.assertIn("""tensorflow_text""" , lowercase_ )
self.assertIn("""sentencepiece_and_tokenizers""" , lowercase_ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""BertModel""" , objects["""torch"""] )
self.assertIn("""TFBertModel""" , objects["""tf"""] )
self.assertIn("""FlaxBertModel""" , objects["""flax"""] )
self.assertIn("""BertModel""" , objects["""torch"""] )
self.assertIn("""TFBertTokenizer""" , objects["""tensorflow_text"""] )
self.assertIn("""convert_slow_tokenizer""" , objects["""sentencepiece_and_tokenizers"""] )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : Tuple = create_dummy_object("""CONSTANT""" , """'torch'""" )
self.assertEqual(lowercase_ , """\nCONSTANT = None\n""" )
A__ : int = create_dummy_object("""function""" , """'torch'""" )
self.assertEqual(
lowercase_ , """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
A__ : Tuple = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n"
A__ : Dict = create_dummy_object("""FakeClass""" , """'torch'""" )
self.assertEqual(lowercase_ , lowercase_ )
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
A__ : str = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n"
A__ : List[Any] = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""] , lowercase_ )
| 351
|
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[int] , snake_case : Optional[Any] , snake_case : Tuple=13 , snake_case : Dict=7 , snake_case : Optional[int]=True , snake_case : Union[str, Any]=True , snake_case : Dict=True , snake_case : Any=True , snake_case : List[str]=99 , snake_case : str=64 , snake_case : Optional[int]=5 , snake_case : str=4 , snake_case : List[Any]=37 , snake_case : Optional[Any]="gelu" , snake_case : List[str]=0.1 , snake_case : str=0.1 , snake_case : Optional[int]=512 , snake_case : Dict=16 , snake_case : List[Any]=2 , snake_case : Optional[int]=0.02 , snake_case : Any=3 , snake_case : Union[str, Any]=4 , snake_case : Dict=None , ):
'''simple docstring'''
A__ : Tuple = parent
A__ : Union[str, Any] = batch_size
A__ : List[str] = seq_length
A__ : Optional[int] = is_training
A__ : Dict = use_input_mask
A__ : Any = use_token_type_ids
A__ : Optional[Any] = use_labels
A__ : List[str] = vocab_size
A__ : Optional[int] = hidden_size
A__ : Optional[Any] = num_hidden_layers
A__ : Any = num_attention_heads
A__ : List[Any] = intermediate_size
A__ : Optional[Any] = hidden_act
A__ : Optional[int] = hidden_dropout_prob
A__ : Tuple = attention_probs_dropout_prob
A__ : str = max_position_embeddings
A__ : List[str] = type_vocab_size
A__ : Union[str, Any] = type_sequence_label_size
A__ : List[Any] = initializer_range
A__ : Optional[int] = num_labels
A__ : Dict = num_choices
A__ : Dict = scope
A__ : List[Any] = vocab_size - 1
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : List[Any] = None
if self.use_input_mask:
A__ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
A__ : Union[str, Any] = None
if self.use_labels:
A__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : Tuple = self.get_config()
return config, input_ids, input_mask, token_labels
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
A__ , A__ , A__ , A__ : str = self.prepare_config_and_inputs()
A__ : Union[str, Any] = True
return config, input_ids, input_mask, token_labels
def _UpperCamelCase ( self : Union[str, Any] , snake_case : Optional[int] , snake_case : List[str] , snake_case : int ):
'''simple docstring'''
A__ : Any = GPTNeoXModel(config=snake_case )
model.to(snake_case )
model.eval()
A__ : int = model(snake_case , attention_mask=snake_case )
A__ : Optional[int] = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : Union[str, Any] , snake_case : str , snake_case : Any , snake_case : Union[str, Any] ):
'''simple docstring'''
A__ : int = True
A__ : str = GPTNeoXModel(snake_case )
model.to(snake_case )
model.eval()
A__ : Tuple = model(snake_case , attention_mask=snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : Dict , snake_case : List[Any] , snake_case : str , snake_case : Optional[Any] , snake_case : Any ):
'''simple docstring'''
A__ : Any = GPTNeoXForCausalLM(config=snake_case )
model.to(snake_case )
model.eval()
A__ : Tuple = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self : List[str] , snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : Union[str, Any] , snake_case : Tuple ):
'''simple docstring'''
A__ : int = self.num_labels
A__ : int = GPTNeoXForQuestionAnswering(snake_case )
model.to(snake_case )
model.eval()
A__ : Optional[Any] = model(snake_case , attention_mask=snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self : str , snake_case : Tuple , snake_case : int , snake_case : int , snake_case : Dict ):
'''simple docstring'''
A__ : List[Any] = self.num_labels
A__ : Tuple = GPTNeoXForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
A__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : List[str] = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self : Any , snake_case : Union[str, Any] , snake_case : int , snake_case : Tuple , snake_case : Any ):
'''simple docstring'''
A__ : Tuple = self.num_labels
A__ : Any = GPTNeoXForTokenClassification(snake_case )
model.to(snake_case )
model.eval()
A__ : Dict = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self : List[str] , snake_case : List[str] , snake_case : Tuple , snake_case : Any ):
'''simple docstring'''
A__ : Optional[int] = True
A__ : Any = GPTNeoXForCausalLM(config=snake_case )
model.to(snake_case )
model.eval()
# first forward pass
A__ : Tuple = model(snake_case , attention_mask=snake_case , use_cache=snake_case )
A__ : str = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A__ : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
A__ : Any = torch.cat([input_ids, next_tokens] , dim=-1 )
A__ : Any = torch.cat([input_mask, next_mask] , dim=-1 )
A__ : Tuple = model(snake_case , attention_mask=snake_case , output_hidden_states=snake_case )
A__ : List[Any] = output_from_no_past["""hidden_states"""][0]
A__ : List[str] = model(
snake_case , attention_mask=snake_case , past_key_values=snake_case , output_hidden_states=snake_case , )["""hidden_states"""][0]
# select random slice
A__ : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A__ : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
A__ : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case , snake_case , atol=1e-3 ) )
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ : str = self.prepare_config_and_inputs()
A__ , A__ , A__ , A__ : Dict = config_and_inputs
A__ : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase ):
snake_case_ = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case_ = (GPTNeoXForCausalLM,) if is_torch_available() else ()
snake_case_ = (
{
'feature-extraction': GPTNeoXModel,
'question-answering': GPTNeoXForQuestionAnswering,
'text-classification': GPTNeoXForSequenceClassification,
'text-generation': GPTNeoXForCausalLM,
'token-classification': GPTNeoXForTokenClassification,
'zero-shot': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : Any = GPTNeoXModelTester(self )
A__ : Any = ConfigTester(self , config_class=snake_case , hidden_size=64 , num_attention_heads=8 )
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
A__ , A__ , A__ , A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(snake_case , snake_case , snake_case )
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
A__ , A__ , A__ , A__ : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(snake_case , snake_case , snake_case )
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
A__ , A__ , A__ , A__ : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
A__ : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(snake_case , snake_case , snake_case )
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ , A__ , A__ , A__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(snake_case , snake_case , snake_case )
def _UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
A__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*snake_case )
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case )
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case )
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case )
@unittest.skip(reason="""Feed forward chunking is not implemented""" )
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _UpperCamelCase ( self : Optional[Any] , snake_case : Optional[Any] ):
'''simple docstring'''
A__ , A__ : int = self.model_tester.prepare_config_and_inputs_for_common()
A__ : List[Any] = ids_tensor([1, 10] , config.vocab_size )
A__ : str = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A__ : Union[str, Any] = GPTNeoXModel(snake_case )
original_model.to(snake_case )
original_model.eval()
A__ : Optional[int] = original_model(snake_case ).last_hidden_state
A__ : List[str] = original_model(snake_case ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A__ : Optional[int] = {"""type""": scaling_type, """factor""": 10.0}
A__ : Optional[int] = GPTNeoXModel(snake_case )
scaled_model.to(snake_case )
scaled_model.eval()
A__ : List[str] = scaled_model(snake_case ).last_hidden_state
A__ : Tuple = scaled_model(snake_case ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case , snake_case , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(snake_case , snake_case , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case , snake_case , atol=1e-5 ) )
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : Any = AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
for checkpointing in [True, False]:
A__ : Optional[Any] = GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(snake_case )
A__ : Optional[Any] = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(snake_case )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
A__ : Union[str, Any] = """My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure"""
A__ : Tuple = model.generate(**snake_case , do_sample=snake_case , max_new_tokens=20 )
A__ : Tuple = tokenizer.batch_decode(snake_case )[0]
self.assertEqual(snake_case , snake_case )
| 296
| 0
|
"""simple docstring"""
from pathlib import Path
import numpy as np
from PIL import Image
def _lowerCAmelCase ( UpperCAmelCase__ : np.ndarray ) ->np.ndarray:
A__ : Optional[int] = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2989 * r + 0.5870 * g + 0.1140 * b
def _lowerCAmelCase ( UpperCAmelCase__ : np.ndarray ) ->np.ndarray:
return (gray > 1_2_7) & (gray <= 2_5_5)
def _lowerCAmelCase ( UpperCAmelCase__ : np.ndarray, UpperCAmelCase__ : np.ndarray ) ->np.ndarray:
A__ : Optional[Any] = np.zeros_like(_snake_case )
A__ : Tuple = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
A__ : Optional[Any] = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
A__ : str = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
A__ : Optional[Any] = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
A_ = Path(__file__).resolve().parent / """image_data""" / """lena.jpg"""
A_ = np.array(Image.open(lena_path))
# kernel to be applied
A_ = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
A_ = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
A_ = Image.fromarray(output).convert('''RGB''')
pil_img.save('''result_dilation.png''')
| 352
|
"""simple docstring"""
from collections import defaultdict
from math import gcd
def _lowerCAmelCase ( UpperCAmelCase__ : int = 1_5_0_0_0_0_0 ) ->int:
A__ : defaultdict = defaultdict(UpperCAmelCase__ )
A__ : Any = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1, UpperCAmelCase__, 2 ):
if gcd(UpperCAmelCase__, UpperCAmelCase__ ) > 1:
continue
A__ : str = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(UpperCAmelCase__, limit + 1, UpperCAmelCase__ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F'{solution() = }')
| 296
| 0
|
"""simple docstring"""
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def _lowerCAmelCase ( UpperCAmelCase__ : Tuple, UpperCAmelCase__ : Tuple ) ->Optional[int]:
assert isinstance(_A, _A )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("""keep_in_memory""", [False, True] )
def _lowerCAmelCase ( UpperCAmelCase__ : int, UpperCAmelCase__ : str, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : List[str] ) ->int:
A__ : str = tmp_path / 'cache'
A__ : Optional[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A__ : Optional[int] = SqlDatasetReader(
"""dataset""", """sqlite:///""" + sqlite_path, cache_dir=_A, keep_in_memory=_A ).read()
_check_sql_dataset(_A, _A )
@require_sqlalchemy
@pytest.mark.parametrize(
"""features""", [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
], )
def _lowerCAmelCase ( UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : int, UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Optional[Any] ) ->Optional[int]:
A__ : Dict = tmp_path / 'cache'
A__ : List[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
A__ : Tuple = features.copy() if features else default_expected_features
A__ : Optional[Any] = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
A__ : List[Any] = SqlDatasetReader("""dataset""", """sqlite:///""" + sqlite_path, features=_A, cache_dir=_A ).read()
_check_sql_dataset(_A, _A )
def _lowerCAmelCase ( UpperCAmelCase__ : Dict ) ->Union[str, Any]:
with contextlib.closing(sqlitea.connect(_A ) ) as con:
A__ : Any = con.cursor()
cur.execute("""SELECT * FROM dataset""" )
for row in cur:
yield row
@require_sqlalchemy
def _lowerCAmelCase ( UpperCAmelCase__ : List[Any], UpperCAmelCase__ : List[str], UpperCAmelCase__ : Tuple ) ->str:
A__ : Tuple = tmp_path / 'cache'
A__ : Union[str, Any] = os.path.join(_A, """tmp.sql""" )
A__ : Any = SqlDatasetReader("""dataset""", """sqlite:///""" + sqlite_path, cache_dir=_A ).read()
SqlDatasetWriter(_A, """dataset""", """sqlite:///""" + output_sqlite_path, num_proc=1 ).write()
A__ : int = iter_sql_file(_A )
A__ : List[str] = iter_sql_file(_A )
for rowa, rowa in zip(_A, _A ):
assert rowa == rowa
@require_sqlalchemy
def _lowerCAmelCase ( UpperCAmelCase__ : str, UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : List[Any] ) ->Any:
A__ : str = tmp_path / 'cache'
A__ : Any = os.path.join(_A, """tmp.sql""" )
A__ : int = SqlDatasetReader("""dataset""", """sqlite:///""" + sqlite_path, cache_dir=_A ).read()
SqlDatasetWriter(_A, """dataset""", """sqlite:///""" + output_sqlite_path, num_proc=2 ).write()
A__ : Any = iter_sql_file(_A )
A__ : List[Any] = iter_sql_file(_A )
for rowa, rowa in zip(_A, _A ):
assert rowa == rowa
@require_sqlalchemy
def _lowerCAmelCase ( UpperCAmelCase__ : str, UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Union[str, Any] ) ->Dict:
A__ : Optional[Any] = tmp_path / 'cache'
A__ : Optional[int] = os.path.join(_A, """tmp.sql""" )
A__ : List[Any] = SqlDatasetReader("""dataset""", """sqlite:///""" + sqlite_path, cache_dir=_A ).read()
with pytest.raises(_A ):
SqlDatasetWriter(_A, """dataset""", """sqlite:///""" + output_sqlite_path, num_proc=0 ).write()
| 353
|
"""simple docstring"""
import os
from distutils.util import strtobool
def _lowerCAmelCase ( UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Optional[Any] ) ->List[str]:
for e in env_keys:
A__ : List[Any] = int(os.environ.get(UpperCAmelCase__, -1 ) )
if val >= 0:
return val
return default
def _lowerCAmelCase ( UpperCAmelCase__ : Tuple, UpperCAmelCase__ : str=False ) ->List[str]:
A__ : List[Any] = os.environ.get(UpperCAmelCase__, str(UpperCAmelCase__ ) )
return strtobool(UpperCAmelCase__ ) == 1 # As its name indicates `strtobool` actually returns an int...
def _lowerCAmelCase ( UpperCAmelCase__ : Tuple, UpperCAmelCase__ : List[Any]="no" ) ->int:
A__ : str = os.environ.get(UpperCAmelCase__, str(UpperCAmelCase__ ) )
return value
| 296
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = 'gpt_bigcode'
snake_case_ = ['past_key_values']
snake_case_ = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : List[Any] , snake_case : Union[str, Any]=5_0257 , snake_case : Optional[Any]=1024 , snake_case : int=768 , snake_case : List[str]=12 , snake_case : str=12 , snake_case : Dict=None , snake_case : Optional[Any]="gelu_pytorch_tanh" , snake_case : int=0.1 , snake_case : Optional[int]=0.1 , snake_case : Any=0.1 , snake_case : str=1e-5 , snake_case : Union[str, Any]=0.02 , snake_case : int=True , snake_case : Union[str, Any]=True , snake_case : Dict=5_0256 , snake_case : Optional[int]=5_0256 , snake_case : Union[str, Any]=True , snake_case : int=True , snake_case : int=True , **snake_case : str , ):
'''simple docstring'''
A__ : List[Any] = vocab_size
A__ : List[Any] = n_positions
A__ : List[Any] = n_embd
A__ : Optional[int] = n_layer
A__ : Dict = n_head
A__ : int = n_inner
A__ : str = activation_function
A__ : Union[str, Any] = resid_pdrop
A__ : str = embd_pdrop
A__ : Dict = attn_pdrop
A__ : str = layer_norm_epsilon
A__ : List[Any] = initializer_range
A__ : Any = scale_attn_weights
A__ : List[str] = use_cache
A__ : Optional[Any] = attention_softmax_in_fpaa
A__ : Optional[Any] = scale_attention_softmax_in_fpaa
A__ : int = multi_query
A__ : str = bos_token_id
A__ : str = eos_token_id
super().__init__(bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
| 354
|
"""simple docstring"""
import cva
import numpy as np
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , snake_case : float , snake_case : int ):
'''simple docstring'''
if k in (0.04, 0.06):
A__ : Optional[int] = k
A__ : int = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : List[Any] ):
'''simple docstring'''
return str(self.k )
def _UpperCamelCase ( self : int , snake_case : str ):
'''simple docstring'''
A__ : List[str] = cva.imread(snake_case , 0 )
A__ , A__ : Union[str, Any] = img.shape
A__ : list[list[int]] = []
A__ : Optional[Any] = img.copy()
A__ : List[str] = cva.cvtColor(snake_case , cva.COLOR_GRAY2RGB )
A__ , A__ : List[Any] = np.gradient(snake_case )
A__ : List[Any] = dx**2
A__ : Any = dy**2
A__ : Dict = dx * dy
A__ : Any = 0.04
A__ : Optional[Any] = self.window_size // 2
for y in range(snake_case , h - offset ):
for x in range(snake_case , w - offset ):
A__ : List[str] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A__ : Tuple = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A__ : Optional[int] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A__ : int = (wxx * wyy) - (wxy**2)
A__ : Any = wxx + wyy
A__ : List[str] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
A_ = HarrisCorner(0.04, 3)
A_ , A_ = edge_detect.detect('''path_to_image''')
cva.imwrite('''detect.png''', color_img)
| 296
| 0
|
"""simple docstring"""
import math
def _lowerCAmelCase ( ) ->List[Any]:
A__ : Dict = input("""Enter message: """ )
A__ : Optional[int] = int(input(f'Enter key [2-{len(snake_case_ ) - 1}]: ' ) )
A__ : Union[str, Any] = input("""Encryption/Decryption [e/d]: """ )
if mode.lower().startswith("""e""" ):
A__ : int = encrypt_message(snake_case_, snake_case_ )
elif mode.lower().startswith("""d""" ):
A__ : str = decrypt_message(snake_case_, snake_case_ )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(f'Output:\n{text + "|"}' )
def _lowerCAmelCase ( UpperCAmelCase__ : int, UpperCAmelCase__ : str ) ->List[str]:
A__ : List[str] = [""""""] * key
for col in range(snake_case_ ):
A__ : Optional[Any] = col
while pointer < len(snake_case_ ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(snake_case_ )
def _lowerCAmelCase ( UpperCAmelCase__ : int, UpperCAmelCase__ : str ) ->Tuple:
A__ : Any = math.ceil(len(snake_case_ ) / key )
A__ : Tuple = key
A__ : Any = (num_cols * num_rows) - len(snake_case_ )
A__ : List[str] = [""""""] * num_cols
A__ : Dict = 0
A__ : Optional[int] = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
A__ : Dict = 0
row += 1
return "".join(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 355
|
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
A_ = logging.get_logger(__name__)
A_ = Dict[str, Any]
A_ = List[Prediction]
@add_end_docstrings(UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
def __init__( self : str , *snake_case : Tuple , **snake_case : Tuple ):
'''simple docstring'''
super().__init__(*snake_case , **snake_case )
if self.framework == "tf":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
requires_backends(self , """vision""" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def _UpperCamelCase ( self : List[Any] , **snake_case : Optional[int] ):
'''simple docstring'''
A__ : Dict = {}
if "threshold" in kwargs:
A__ : int = kwargs["""threshold"""]
return {}, {}, postprocess_kwargs
def __call__( self : Tuple , *snake_case : Union[str, Any] , **snake_case : Union[str, Any] ):
'''simple docstring'''
return super().__call__(*snake_case , **snake_case )
def _UpperCamelCase ( self : str , snake_case : int ):
'''simple docstring'''
A__ : List[str] = load_image(snake_case )
A__ : int = torch.IntTensor([[image.height, image.width]] )
A__ : Union[str, Any] = self.image_processor(images=[image] , return_tensors="""pt""" )
if self.tokenizer is not None:
A__ : str = self.tokenizer(text=inputs["""words"""] , boxes=inputs["""boxes"""] , return_tensors="""pt""" )
A__ : List[str] = target_size
return inputs
def _UpperCamelCase ( self : Optional[int] , snake_case : List[Any] ):
'''simple docstring'''
A__ : str = model_inputs.pop("""target_size""" )
A__ : Dict = self.model(**snake_case )
A__ : Optional[Any] = outputs.__class__({"""target_size""": target_size, **outputs} )
if self.tokenizer is not None:
A__ : str = model_inputs["""bbox"""]
return model_outputs
def _UpperCamelCase ( self : Tuple , snake_case : Optional[int] , snake_case : int=0.9 ):
'''simple docstring'''
A__ : Any = model_outputs["""target_size"""]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
A__ , A__ : Tuple = target_size[0].tolist()
def unnormalize(snake_case : Optional[int] ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
A__ , A__ : Optional[int] = model_outputs["""logits"""].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
A__ : Optional[Any] = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
A__ : List[str] = [unnormalize(snake_case ) for bbox in model_outputs["""bbox"""].squeeze(0 )]
A__ : Tuple = ["""score""", """label""", """box"""]
A__ : Any = [dict(zip(snake_case , snake_case ) ) for vals in zip(scores.tolist() , snake_case , snake_case ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
A__ : Union[str, Any] = self.image_processor.post_process_object_detection(snake_case , snake_case , snake_case )
A__ : str = raw_annotations[0]
A__ : str = raw_annotation["""scores"""]
A__ : List[Any] = raw_annotation["""labels"""]
A__ : int = raw_annotation["""boxes"""]
A__ : str = scores.tolist()
A__ : Any = [self.model.config.idalabel[label.item()] for label in labels]
A__ : int = [self._get_bounding_box(snake_case ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
A__ : str = ["""score""", """label""", """box"""]
A__ : Dict = [
dict(zip(snake_case , snake_case ) )
for vals in zip(raw_annotation["""scores"""] , raw_annotation["""labels"""] , raw_annotation["""boxes"""] )
]
return annotation
def _UpperCamelCase ( self : Union[str, Any] , snake_case : "torch.Tensor" ):
'''simple docstring'''
if self.framework != "pt":
raise ValueError("""The ObjectDetectionPipeline is only available in PyTorch.""" )
A__ , A__ , A__ , A__ : Any = box.int().tolist()
A__ : Any = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 296
| 0
|
"""simple docstring"""
def _lowerCAmelCase ( UpperCAmelCase__ : bytes ) ->str:
return "".join([hex(lowercase__ )[2:].zfill(2 ).upper() for byte in list(lowercase__ )] )
def _lowerCAmelCase ( UpperCAmelCase__ : str ) ->bytes:
if (len(lowercase__ ) % 2) != 0:
raise ValueError(
"""Base16 encoded data is invalid:
Data does not have an even number of hex digits.""" )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(lowercase__ ) <= set("""0123456789ABCDEF""" ):
raise ValueError(
"""Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.""" )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1], 1_6 ) for i in range(0, len(lowercase__ ), 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 356
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A_ = logging.get_logger(__name__)
A_ = {
'''microsoft/table-transformer-detection''': (
'''https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'''
),
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = 'table-transformer'
snake_case_ = ['past_key_values']
snake_case_ = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Dict , snake_case : int=True , snake_case : Dict=None , snake_case : Union[str, Any]=3 , snake_case : Dict=100 , snake_case : Tuple=6 , snake_case : Optional[int]=2048 , snake_case : int=8 , snake_case : Dict=6 , snake_case : Any=2048 , snake_case : str=8 , snake_case : Union[str, Any]=0.0 , snake_case : List[str]=0.0 , snake_case : List[str]=True , snake_case : Any="relu" , snake_case : str=256 , snake_case : int=0.1 , snake_case : Dict=0.0 , snake_case : str=0.0 , snake_case : Union[str, Any]=0.02 , snake_case : Union[str, Any]=1.0 , snake_case : Optional[Any]=False , snake_case : int="sine" , snake_case : Optional[Any]="resnet50" , snake_case : Optional[int]=True , snake_case : Any=False , snake_case : int=1 , snake_case : Tuple=5 , snake_case : Optional[int]=2 , snake_case : Tuple=1 , snake_case : Optional[Any]=1 , snake_case : Optional[Any]=5 , snake_case : Dict=2 , snake_case : Any=0.1 , **snake_case : Any , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
A__ : Optional[Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(snake_case , snake_case ):
A__ : Optional[int] = backbone_config.get("""model_type""" )
A__ : Optional[int] = CONFIG_MAPPING[backbone_model_type]
A__ : List[str] = config_class.from_dict(snake_case )
# set timm attributes to None
A__ , A__ , A__ : str = None, None, None
A__ : Tuple = use_timm_backbone
A__ : str = backbone_config
A__ : str = num_channels
A__ : List[Any] = num_queries
A__ : Optional[Any] = d_model
A__ : Tuple = encoder_ffn_dim
A__ : Union[str, Any] = encoder_layers
A__ : List[Any] = encoder_attention_heads
A__ : Optional[int] = decoder_ffn_dim
A__ : Any = decoder_layers
A__ : int = decoder_attention_heads
A__ : Any = dropout
A__ : Dict = attention_dropout
A__ : Dict = activation_dropout
A__ : Tuple = activation_function
A__ : List[str] = init_std
A__ : List[str] = init_xavier_std
A__ : Any = encoder_layerdrop
A__ : Optional[Any] = decoder_layerdrop
A__ : Union[str, Any] = encoder_layers
A__ : Dict = auxiliary_loss
A__ : List[Any] = position_embedding_type
A__ : Optional[Any] = backbone
A__ : str = use_pretrained_backbone
A__ : Union[str, Any] = dilation
# Hungarian matcher
A__ : Tuple = class_cost
A__ : Optional[Any] = bbox_cost
A__ : Dict = giou_cost
# Loss coefficients
A__ : Any = mask_loss_coefficient
A__ : str = dice_loss_coefficient
A__ : str = bbox_loss_coefficient
A__ : Union[str, Any] = giou_loss_coefficient
A__ : List[str] = eos_coefficient
super().__init__(is_encoder_decoder=snake_case , **snake_case )
@property
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
return self.d_model
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = version.parse('1.11' )
@property
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
return 1e-5
@property
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
return 12
| 296
| 0
|
"""simple docstring"""
def _lowerCAmelCase ( UpperCAmelCase__ : int ) ->List[str]:
A__ : Union[str, Any] = [[0 for _ in range(A__ )] for _ in range(m + 1 )]
for i in range(m + 1 ):
A__ : Dict = 1
for n in range(m + 1 ):
for k in range(1, A__ ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
A_ = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
A_ = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 357
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = 'Salesforce/blip-image-captioning-base'
snake_case_ = (
'This is a tool that generates a description of an image. It takes an input named `image` which should be the '
'image to caption, and returns a text that contains the description in English.'
)
snake_case_ = 'image_captioner'
snake_case_ = AutoModelForVisionaSeq
snake_case_ = ['image']
snake_case_ = ['text']
def __init__( self : int , *snake_case : Optional[int] , **snake_case : Optional[int] ):
'''simple docstring'''
requires_backends(self , ["""vision"""] )
super().__init__(*snake_case , **snake_case )
def _UpperCamelCase ( self : int , snake_case : "Image" ):
'''simple docstring'''
return self.pre_processor(images=snake_case , return_tensors="""pt""" )
def _UpperCamelCase ( self : int , snake_case : List[Any] ):
'''simple docstring'''
return self.model.generate(**snake_case )
def _UpperCamelCase ( self : Optional[int] , snake_case : Any ):
'''simple docstring'''
return self.pre_processor.batch_decode(snake_case , skip_special_tokens=snake_case )[0].strip()
| 296
| 0
|
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ):
# TODO: is there an appropriate internal test set?
snake_case_ = 'ssube/stable-diffusion-x4-upscaler-onnx'
def _UpperCamelCase ( self : Tuple , snake_case : Any=0 ):
'''simple docstring'''
A__ : Optional[Any] = floats_tensor((1, 3, 128, 128) , rng=random.Random(__lowerCAmelCase ) )
A__ : Union[str, Any] = torch.manual_seed(__lowerCAmelCase )
A__ : Optional[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
A__ : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
A__ : Dict = self.get_dummy_inputs()
A__ : int = pipe(**__lowerCAmelCase ).images
A__ : Tuple = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
A__ : int = np.array(
[0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
A__ : Union[str, Any] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
A__ : Dict = self.get_dummy_inputs()
A__ : int = pipe(**__lowerCAmelCase ).images
A__ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A__ : Union[str, Any] = np.array(
[0.6898892, 0.59240556, 0.52499527, 0.58866215, 0.52258235, 0.52572715, 0.62414473, 0.6174387, 0.6214964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
A__ : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
A__ : Union[str, Any] = self.get_dummy_inputs()
A__ : Tuple = pipe(**__lowerCAmelCase ).images
A__ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A__ : Any = np.array(
[0.7659278, 0.76437664, 0.75579107, 0.7691116, 0.77666986, 0.7727672, 0.7758664, 0.7812226, 0.76942515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
A__ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
A__ : List[str] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
A__ : List[Any] = self.get_dummy_inputs()
A__ : Union[str, Any] = pipe(**__lowerCAmelCase ).images
A__ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A__ : Optional[Any] = np.array(
[0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
A__ : int = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
A__ : Dict = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
A__ : List[str] = self.get_dummy_inputs()
A__ : Optional[Any] = pipe(**__lowerCAmelCase ).images
A__ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A__ : str = np.array(
[0.77424496, 0.773601, 0.7645288, 0.7769598, 0.7772739, 0.7738688, 0.78187233, 0.77879584, 0.767043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@property
def _UpperCamelCase ( self : str ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ : Union[str, Any] = ort.SessionOptions()
A__ : Dict = False
return options
def _UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
A__ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
A__ : Tuple = init_image.resize((128, 128) )
# using the PNDM scheduler by default
A__ : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
A__ : List[Any] = """A fantasy landscape, trending on artstation"""
A__ : str = torch.manual_seed(0 )
A__ : str = pipe(
prompt=__lowerCAmelCase , image=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=10 , generator=__lowerCAmelCase , output_type="""np""" , )
A__ : Any = output.images
A__ : List[str] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
A__ : Dict = np.array([0.4883, 0.4947, 0.4980, 0.4975, 0.4982, 0.4980, 0.5000, 0.5006, 0.4972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
A__ : Any = init_image.resize((128, 128) )
A__ : Optional[Any] = LMSDiscreteScheduler.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , subfolder="""scheduler""" )
A__ : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , scheduler=__lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
A__ : Optional[int] = """A fantasy landscape, trending on artstation"""
A__ : List[Any] = torch.manual_seed(0 )
A__ : List[Any] = pipe(
prompt=__lowerCAmelCase , image=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=20 , generator=__lowerCAmelCase , output_type="""np""" , )
A__ : Optional[int] = output.images
A__ : Union[str, Any] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
A__ : Dict = np.array(
[0.50173753, 0.50223356, 0.502039, 0.50233036, 0.5023725, 0.5022601, 0.5018758, 0.50234085, 0.50241566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 358
|
"""simple docstring"""
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : List[Any] ):
'''simple docstring'''
super().__init__()
A__ : int = nn.Linear(3 , 4 )
A__ : Union[str, Any] = nn.BatchNormad(4 )
A__ : Union[str, Any] = nn.Linear(4 , 5 )
def _UpperCamelCase ( self : str , snake_case : List[str] ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(snake_case ) ) )
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ : int = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(snake_case , model.state_dict() )
A__ : List[str] = os.path.join(snake_case , """index.json""" )
self.assertTrue(os.path.isfile(snake_case ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
A__ : List[str] = os.path.join(snake_case , F'{key}.dat' )
self.assertTrue(os.path.isfile(snake_case ) )
# TODO: add tests on the fact weights are properly loaded
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : Union[str, Any] = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
A__ : str = torch.randn(2 , 3 , dtype=snake_case )
with TemporaryDirectory() as tmp_dir:
A__ : List[str] = offload_weight(snake_case , """weight""" , snake_case , {} )
A__ : Union[str, Any] = os.path.join(snake_case , """weight.dat""" )
self.assertTrue(os.path.isfile(snake_case ) )
self.assertDictEqual(snake_case , {"""weight""": {"""shape""": [2, 3], """dtype""": str(snake_case ).split(""".""" )[1]}} )
A__ : str = load_offloaded_weight(snake_case , index["""weight"""] )
self.assertTrue(torch.equal(snake_case , snake_case ) )
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
A__ : str = ModelForTest()
A__ : Union[str, Any] = model.state_dict()
A__ : Optional[int] = {k: v for k, v in state_dict.items() if """linear2""" not in k}
A__ : List[Any] = {k: v for k, v in state_dict.items() if """linear2""" in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(snake_case , snake_case )
A__ : Dict = OffloadedWeightsLoader(state_dict=snake_case , save_folder=snake_case )
# Every key is there with the right value
self.assertEqual(sorted(snake_case ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(snake_case , weight_map[key] ) )
A__ : int = {k: v for k, v in state_dict.items() if """weight""" in k}
A__ : Tuple = {k: v for k, v in state_dict.items() if """weight""" not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(snake_case , snake_case )
A__ : Optional[Any] = OffloadedWeightsLoader(state_dict=snake_case , save_folder=snake_case )
# Every key is there with the right value
self.assertEqual(sorted(snake_case ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(snake_case , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(snake_case , snake_case )
# Duplicates are removed
A__ : int = OffloadedWeightsLoader(state_dict=snake_case , save_folder=snake_case )
# Every key is there with the right value
self.assertEqual(sorted(snake_case ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(snake_case , weight_map[key] ) )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : List[str] = {"""a.1""": 0, """a.10""": 1, """a.2""": 2}
A__ : str = extract_submodules_state_dict(snake_case , ["""a.1""", """a.2"""] )
self.assertDictEqual(snake_case , {"""a.1""": 0, """a.2""": 2} )
A__ : Dict = {"""a.1.a""": 0, """a.10.a""": 1, """a.2.a""": 2}
A__ : int = extract_submodules_state_dict(snake_case , ["""a.1""", """a.2"""] )
self.assertDictEqual(snake_case , {"""a.1.a""": 0, """a.2.a""": 2} )
| 296
| 0
|
def _lowerCAmelCase ( UpperCAmelCase__ : list, UpperCAmelCase__ : list ) ->float:
_validate_point(_UpperCamelCase )
_validate_point(_UpperCamelCase )
if len(_UpperCamelCase ) != len(_UpperCamelCase ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(a - b ) for a, b in zip(_UpperCamelCase, _UpperCamelCase ) ) )
def _lowerCAmelCase ( UpperCAmelCase__ : list[float] ) ->None:
if point:
if isinstance(_UpperCamelCase, _UpperCamelCase ):
for item in point:
if not isinstance(_UpperCamelCase, (int, float) ):
A__ : Optional[int] = (
"""Expected a list of numbers as input, found """
f'{type(_UpperCamelCase ).__name__}'
)
raise TypeError(_UpperCamelCase )
else:
A__ : List[Any] = f'Expected a list of numbers as input, found {type(_UpperCamelCase ).__name__}'
raise TypeError(_UpperCamelCase )
else:
raise ValueError("""Missing an input""" )
def _lowerCAmelCase ( UpperCAmelCase__ : list, UpperCAmelCase__ : list ) ->float:
_validate_point(_UpperCamelCase )
_validate_point(_UpperCamelCase )
if len(_UpperCamelCase ) != len(_UpperCamelCase ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(x - y ) for x, y in zip(_UpperCamelCase, _UpperCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 359
|
"""simple docstring"""
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , snake_case : str , snake_case : List[Any]=13 , snake_case : Union[str, Any]=7 , snake_case : Optional[Any]=True , snake_case : str=True , snake_case : Dict=False , snake_case : Union[str, Any]=True , snake_case : Optional[Any]=99 , snake_case : str=32 , snake_case : Tuple=5 , snake_case : List[str]=4 , snake_case : Optional[int]=37 , snake_case : str="gelu" , snake_case : Tuple=0.1 , snake_case : Optional[int]=0.1 , snake_case : int=512 , snake_case : List[str]=16 , snake_case : str=2 , snake_case : Optional[int]=0.02 , snake_case : str=3 , snake_case : Dict=4 , snake_case : Optional[Any]=None , ):
'''simple docstring'''
A__ : int = parent
A__ : Union[str, Any] = batch_size
A__ : Optional[int] = seq_length
A__ : List[Any] = is_training
A__ : List[str] = use_input_mask
A__ : Optional[Any] = use_token_type_ids
A__ : List[Any] = use_labels
A__ : Union[str, Any] = vocab_size
A__ : List[Any] = hidden_size
A__ : Any = num_hidden_layers
A__ : Any = num_attention_heads
A__ : Optional[int] = intermediate_size
A__ : Any = hidden_act
A__ : Tuple = hidden_dropout_prob
A__ : Dict = attention_probs_dropout_prob
A__ : Optional[int] = max_position_embeddings
A__ : Tuple = type_vocab_size
A__ : Union[str, Any] = type_sequence_label_size
A__ : List[str] = initializer_range
A__ : Any = num_labels
A__ : Any = num_choices
A__ : int = scope
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Tuple = None
if self.use_input_mask:
A__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
A__ : Union[str, Any] = None
if self.use_token_type_ids:
A__ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ : int = None
A__ : int = None
A__ : List[str] = None
if self.use_labels:
A__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
A__ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , )
def _UpperCamelCase ( self : Optional[int] , snake_case : Dict , snake_case : Any , snake_case : Dict , snake_case : Any , snake_case : Union[str, Any] , snake_case : Tuple , snake_case : Optional[Any] ):
'''simple docstring'''
A__ : Optional[Any] = BioGptModel(config=snake_case )
model.to(snake_case )
model.eval()
A__ : List[Any] = model(snake_case , attention_mask=snake_case )
A__ : Dict = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : Optional[int] , snake_case : Dict , snake_case : Optional[int] , snake_case : List[str] , snake_case : str , snake_case : Optional[Any] , snake_case : List[str] , snake_case : List[Any] , snake_case : Tuple , snake_case : Optional[Any] , ):
'''simple docstring'''
A__ : List[str] = BioGptForCausalLM(config=snake_case )
model.to(snake_case )
model.eval()
A__ : Union[str, Any] = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self : Any , snake_case : str , snake_case : Tuple , snake_case : int , snake_case : Optional[Any] , snake_case : Any , *snake_case : Dict ):
'''simple docstring'''
A__ : Union[str, Any] = BioGptModel(config=snake_case )
model.to(snake_case )
model.eval()
# create attention mask
A__ : List[Any] = torch.ones(input_ids.shape , dtype=torch.long , device=snake_case )
A__ : Any = self.seq_length // 2
A__ : str = 0
# first forward pass
A__ , A__ : List[Any] = model(snake_case , attention_mask=snake_case ).to_tuple()
# create hypothetical next token and extent to next_input_ids
A__ : int = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
A__ : List[str] = ids_tensor((1,) , snake_case ).item() + 1
A__ : Optional[int] = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
A__ : int = random_other_next_tokens
# append to next input_ids and attn_mask
A__ : str = torch.cat([input_ids, next_tokens] , dim=-1 )
A__ : List[Any] = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=snake_case )] , dim=1 , )
# get two different outputs
A__ : Union[str, Any] = model(snake_case , attention_mask=snake_case )["""last_hidden_state"""]
A__ : Optional[int] = model(snake_case , past_key_values=snake_case , attention_mask=snake_case )["""last_hidden_state"""]
# select random slice
A__ : List[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A__ : List[str] = output_from_no_past[:, -1, random_slice_idx].detach()
A__ : Any = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case , snake_case , atol=1e-3 ) )
def _UpperCamelCase ( self : int , snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : str , snake_case : int , snake_case : Optional[Any] , *snake_case : str ):
'''simple docstring'''
A__ : Dict = BioGptModel(config=snake_case ).to(snake_case ).eval()
A__ : Tuple = torch.ones(input_ids.shape , dtype=torch.long , device=snake_case )
# first forward pass
A__ : Dict = model(snake_case , attention_mask=snake_case , use_cache=snake_case )
A__ , A__ : List[Any] = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
A__ : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ : int = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
A__ : str = torch.cat([input_ids, next_tokens] , dim=-1 )
A__ : Optional[int] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
A__ : Any = model(snake_case , attention_mask=snake_case )["""last_hidden_state"""]
A__ : Union[str, Any] = model(snake_case , attention_mask=snake_case , past_key_values=snake_case )[
"""last_hidden_state"""
]
# select random slice
A__ : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A__ : Any = output_from_no_past[:, -3:, random_slice_idx].detach()
A__ : List[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case , snake_case , atol=1e-3 ) )
def _UpperCamelCase ( self : Optional[Any] , snake_case : Tuple , snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : Any , snake_case : Tuple , *snake_case : Union[str, Any] , snake_case : Union[str, Any]=False ):
'''simple docstring'''
A__ : Tuple = BioGptForCausalLM(snake_case )
model.to(snake_case )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
A__ : Optional[Any] = model(snake_case , labels=snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def _UpperCamelCase ( self : int , snake_case : Optional[Any] , *snake_case : Optional[int] ):
'''simple docstring'''
A__ : int = BioGptModel(snake_case )
A__ : Union[str, Any] = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def _UpperCamelCase ( self : Any , snake_case : Dict , snake_case : Tuple , snake_case : int , snake_case : Union[str, Any] , snake_case : Dict , *snake_case : Any ):
'''simple docstring'''
A__ : Union[str, Any] = self.num_labels
A__ : int = BioGptForTokenClassification(snake_case )
model.to(snake_case )
model.eval()
A__ : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : List[str] = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : str = config_and_inputs
A__ : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase ):
snake_case_ = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
snake_case_ = (BioGptForCausalLM,) if is_torch_available() else ()
snake_case_ = (
{
'feature-extraction': BioGptModel,
'text-classification': BioGptForSequenceClassification,
'text-generation': BioGptForCausalLM,
'token-classification': BioGptForTokenClassification,
'zero-shot': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = False
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : List[str] = BioGptModelTester(self )
A__ : List[Any] = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
A__ : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A__ : str = type
self.model_tester.create_and_check_model(*snake_case )
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*snake_case )
def _UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
A__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*snake_case , gradient_checkpointing=snake_case )
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
A__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*snake_case )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*snake_case )
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
A__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*snake_case )
@slow
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ : Tuple = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
model.to(snake_case )
A__ : Optional[int] = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
A__ : Any = """left"""
# Define PAD Token = EOS Token = 50256
A__ : Optional[int] = tokenizer.eos_token
A__ : Dict = model.config.eos_token_id
# use different length sentences to test batching
A__ : Union[str, Any] = [
"""Hello, my dog is a little""",
"""Today, I""",
]
A__ : List[str] = tokenizer(snake_case , return_tensors="""pt""" , padding=snake_case )
A__ : str = inputs["""input_ids"""].to(snake_case )
A__ : Dict = model.generate(
input_ids=snake_case , attention_mask=inputs["""attention_mask"""].to(snake_case ) , )
A__ : Optional[int] = tokenizer(sentences[0] , return_tensors="""pt""" ).input_ids.to(snake_case )
A__ : Any = model.generate(input_ids=snake_case )
A__ : List[str] = inputs_non_padded.shape[-1] - inputs["""attention_mask"""][-1].long().sum().cpu().item()
A__ : str = tokenizer(sentences[1] , return_tensors="""pt""" ).input_ids.to(snake_case )
A__ : Dict = model.generate(input_ids=snake_case , max_length=model.config.max_length - num_paddings )
A__ : Optional[Any] = tokenizer.batch_decode(snake_case , skip_special_tokens=snake_case )
A__ : List[Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=snake_case )
A__ : str = tokenizer.decode(output_padded[0] , skip_special_tokens=snake_case )
A__ : Optional[int] = [
"""Hello, my dog is a little bit bigger than a little bit.""",
"""Today, I have a good idea of how to use the information""",
]
self.assertListEqual(snake_case , snake_case )
self.assertListEqual(snake_case , [non_padded_sentence, padded_sentence] )
@slow
def _UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Optional[Any] = BioGptModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ , A__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
A__ : Optional[int] = 3
A__ : List[Any] = input_dict["""input_ids"""]
A__ : Dict = input_ids.ne(1 ).to(snake_case )
A__ : Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
A__ : Union[str, Any] = BioGptForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
A__ : int = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ , A__ : str = self.model_tester.prepare_config_and_inputs_for_common()
A__ : Any = 3
A__ : List[Any] = """multi_label_classification"""
A__ : Dict = input_dict["""input_ids"""]
A__ : Tuple = input_ids.ne(1 ).to(snake_case )
A__ : Any = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
A__ : Tuple = BioGptForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
A__ : List[str] = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
A__ : Optional[Any] = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
A__ : str = torch.tensor([[2, 4805, 9, 656, 21]] )
A__ : Dict = model(snake_case )[0]
A__ : Tuple = 4_2384
A__ : str = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , snake_case )
A__ : str = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) )
@slow
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
A__ : Tuple = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
A__ : Any = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
model.to(snake_case )
torch.manual_seed(0 )
A__ : Tuple = tokenizer("""COVID-19 is""" , return_tensors="""pt""" ).to(snake_case )
A__ : Optional[int] = model.generate(
**snake_case , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=snake_case , )
A__ : Optional[int] = tokenizer.decode(output_ids[0] , skip_special_tokens=snake_case )
A__ : List[str] = (
"""COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the"""
""" causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and"""
""" territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),"""
""" and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and"""
""" more than 800,000 deaths."""
)
self.assertEqual(snake_case , snake_case )
| 296
| 0
|
"""simple docstring"""
from collections import defaultdict
def _lowerCAmelCase ( UpperCAmelCase__ : int ) ->int:
A__ : int = 1
A__ : Optional[Any] = True
for v in tree[start]:
if v not in visited:
ret += dfs(lowerCAmelCase__ )
if ret % 2 == 0:
cuts.append(lowerCAmelCase__ )
return ret
def _lowerCAmelCase ( ) ->Optional[int]:
dfs(1 )
if __name__ == "__main__":
A_ = 10, 9
A_ = defaultdict(list)
A_ = {}
A_ = []
A_ = 0
A_ = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 360
|
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
A_ = logging.get_logger(__name__)
A_ = {'''vocab_file''': '''spiece.model'''}
A_ = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
}
}
A_ = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
# Segments (not really needed)
A_ = 0
A_ = 1
A_ = 2
A_ = 3
A_ = 4
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = 'left'
def __init__( self : Dict , snake_case : int , snake_case : List[Any]=False , snake_case : List[str]=True , snake_case : Dict=False , snake_case : Optional[Any]="<s>" , snake_case : List[str]="</s>" , snake_case : Tuple="<unk>" , snake_case : Tuple="<sep>" , snake_case : Union[str, Any]="<pad>" , snake_case : Dict="<cls>" , snake_case : Optional[Any]="<mask>" , snake_case : Optional[int]=["<eop>", "<eod>"] , snake_case : Optional[Dict[str, Any]] = None , **snake_case : Dict , ):
'''simple docstring'''
A__ : Optional[int] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else mask_token
A__ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=snake_case , remove_space=snake_case , keep_accents=snake_case , bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , additional_special_tokens=snake_case , sp_model_kwargs=self.sp_model_kwargs , **snake_case , )
A__ : str = 3
A__ : str = do_lower_case
A__ : Optional[Any] = remove_space
A__ : List[Any] = keep_accents
A__ : Union[str, Any] = vocab_file
A__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case )
@property
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
return len(self.sp_model )
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : int = {self.convert_ids_to_tokens(snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ):
'''simple docstring'''
A__ : int = self.__dict__.copy()
A__ : int = None
return state
def __setstate__( self : Tuple , snake_case : Union[str, Any] ):
'''simple docstring'''
A__ : int = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
A__ : Optional[int] = {}
A__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCamelCase ( self : List[str] , snake_case : Optional[Any] ):
'''simple docstring'''
if self.remove_space:
A__ : Optional[Any] = """ """.join(inputs.strip().split() )
else:
A__ : Dict = inputs
A__ : str = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
A__ : Any = unicodedata.normalize("""NFKD""" , snake_case )
A__ : Optional[int] = """""".join([c for c in outputs if not unicodedata.combining(snake_case )] )
if self.do_lower_case:
A__ : Any = outputs.lower()
return outputs
def _UpperCamelCase ( self : Union[str, Any] , snake_case : str ):
'''simple docstring'''
A__ : Dict = self.preprocess_text(snake_case )
A__ : Dict = self.sp_model.encode(snake_case , out_type=snake_case )
A__ : Optional[int] = []
for piece in pieces:
if len(snake_case ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
A__ : Optional[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(snake_case , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
A__ : int = cur_pieces[1:]
else:
A__ : Any = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(snake_case )
else:
new_pieces.append(snake_case )
return new_pieces
def _UpperCamelCase ( self : List[str] , snake_case : Tuple ):
'''simple docstring'''
return self.sp_model.PieceToId(snake_case )
def _UpperCamelCase ( self : List[str] , snake_case : Any ):
'''simple docstring'''
return self.sp_model.IdToPiece(snake_case )
def _UpperCamelCase ( self : Optional[int] , snake_case : Any ):
'''simple docstring'''
A__ : Union[str, Any] = """""".join(snake_case ).replace(snake_case , """ """ ).strip()
return out_string
def _UpperCamelCase ( self : int , snake_case : List[int] , snake_case : bool = False , snake_case : bool = None , snake_case : bool = True , **snake_case : Union[str, Any] , ):
'''simple docstring'''
A__ : List[str] = kwargs.pop("""use_source_tokenizer""" , snake_case )
A__ : Any = self.convert_ids_to_tokens(snake_case , skip_special_tokens=snake_case )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
A__ : Any = []
A__ : Any = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(snake_case ) )
A__ : str = []
sub_texts.append(snake_case )
else:
current_sub_text.append(snake_case )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(snake_case ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
A__ : Dict = """""".join(snake_case )
A__ : int = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
A__ : Tuple = self.clean_up_tokenization(snake_case )
return clean_text
else:
return text
def _UpperCamelCase ( self : str , snake_case : List[int] , snake_case : Optional[List[int]] = None ):
'''simple docstring'''
A__ : Tuple = [self.sep_token_id]
A__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _UpperCamelCase ( self : Dict , snake_case : List[int] , snake_case : Optional[List[int]] = None , snake_case : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case )
if token_ids_a is not None:
return ([0] * len(snake_case )) + [1] + ([0] * len(snake_case )) + [1, 1]
return ([0] * len(snake_case )) + [1, 1]
def _UpperCamelCase ( self : str , snake_case : List[int] , snake_case : Optional[List[int]] = None ):
'''simple docstring'''
A__ : Any = [self.sep_token_id]
A__ : int = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _UpperCamelCase ( self : Optional[Any] , snake_case : str , snake_case : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(snake_case ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A__ : List[Any] = os.path.join(
snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case , """wb""" ) as fi:
A__ : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(snake_case )
return (out_vocab_file,)
| 296
| 0
|
"""simple docstring"""
from __future__ import annotations
def _lowerCAmelCase ( UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Tuple = None ) ->list[list[str]]:
A__ : str = word_bank or []
# create a table
A__ : Dict = len(a__ ) + 1
A__ : List[str] = []
for _ in range(a__ ):
table.append([] )
# seed value
A__ : Optional[Any] = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(a__ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(a__ )] == word:
A__ : Union[str, Any] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(a__ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(a__ )]:
combination.reverse()
return table[len(a__ )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 361
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
def _lowerCAmelCase ( UpperCAmelCase__ : List[Any] ) ->List[str]:
A__ : Union[str, Any] = DPTConfig()
if "large" in checkpoint_url:
A__ : int = 1_0_2_4
A__ : Union[str, Any] = 4_0_9_6
A__ : Optional[int] = 2_4
A__ : int = 1_6
A__ : Union[str, Any] = [5, 1_1, 1_7, 2_3]
A__ : Tuple = [2_5_6, 5_1_2, 1_0_2_4, 1_0_2_4]
A__ : Tuple = (1, 3_8_4, 3_8_4)
if "ade" in checkpoint_url:
A__ : Optional[int] = True
A__ : int = 1_5_0
A__ : Union[str, Any] = """huggingface/label-files"""
A__ : List[Any] = """ade20k-id2label.json"""
A__ : Union[str, Any] = json.load(open(cached_download(hf_hub_url(UpperCAmelCase__, UpperCAmelCase__, repo_type="""dataset""" ) ), """r""" ) )
A__ : List[Any] = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()}
A__ : Dict = idalabel
A__ : List[Any] = {v: k for k, v in idalabel.items()}
A__ : Optional[Any] = [1, 1_5_0, 4_8_0, 4_8_0]
return config, expected_shape
def _lowerCAmelCase ( UpperCAmelCase__ : int ) ->Any:
A__ : List[Any] = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase__, UpperCAmelCase__ )
def _lowerCAmelCase ( UpperCAmelCase__ : Union[str, Any] ) ->List[str]:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
A__ : str = name.replace("""pretrained.model""", """dpt.encoder""" )
if "pretrained.model" in name:
A__ : Dict = name.replace("""pretrained.model""", """dpt.embeddings""" )
if "patch_embed" in name:
A__ : List[Any] = name.replace("""patch_embed""", """patch_embeddings""" )
if "pos_embed" in name:
A__ : int = name.replace("""pos_embed""", """position_embeddings""" )
if "attn.proj" in name:
A__ : Tuple = name.replace("""attn.proj""", """attention.output.dense""" )
if "proj" in name and "project" not in name:
A__ : List[Any] = name.replace("""proj""", """projection""" )
if "blocks" in name:
A__ : Optional[Any] = name.replace("""blocks""", """layer""" )
if "mlp.fc1" in name:
A__ : int = name.replace("""mlp.fc1""", """intermediate.dense""" )
if "mlp.fc2" in name:
A__ : List[str] = name.replace("""mlp.fc2""", """output.dense""" )
if "norm1" in name:
A__ : Any = name.replace("""norm1""", """layernorm_before""" )
if "norm2" in name:
A__ : List[str] = name.replace("""norm2""", """layernorm_after""" )
if "scratch.output_conv" in name:
A__ : Optional[int] = name.replace("""scratch.output_conv""", """head""" )
if "scratch" in name:
A__ : List[str] = name.replace("""scratch""", """neck""" )
if "layer1_rn" in name:
A__ : List[str] = name.replace("""layer1_rn""", """convs.0""" )
if "layer2_rn" in name:
A__ : Optional[int] = name.replace("""layer2_rn""", """convs.1""" )
if "layer3_rn" in name:
A__ : Any = name.replace("""layer3_rn""", """convs.2""" )
if "layer4_rn" in name:
A__ : Any = name.replace("""layer4_rn""", """convs.3""" )
if "refinenet" in name:
A__ : Union[str, Any] = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
A__ : str = name.replace(f'refinenet{layer_idx}', f'fusion_stage.layers.{abs(layer_idx-4 )}' )
if "out_conv" in name:
A__ : Optional[Any] = name.replace("""out_conv""", """projection""" )
if "resConfUnit1" in name:
A__ : List[Any] = name.replace("""resConfUnit1""", """residual_layer1""" )
if "resConfUnit2" in name:
A__ : Tuple = name.replace("""resConfUnit2""", """residual_layer2""" )
if "conv1" in name:
A__ : Tuple = name.replace("""conv1""", """convolution1""" )
if "conv2" in name:
A__ : List[Any] = name.replace("""conv2""", """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
A__ : Union[str, Any] = name.replace("""pretrained.act_postprocess1.0.project.0""", """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
A__ : Tuple = name.replace("""pretrained.act_postprocess2.0.project.0""", """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
A__ : Optional[Any] = name.replace("""pretrained.act_postprocess3.0.project.0""", """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
A__ : Optional[Any] = name.replace("""pretrained.act_postprocess4.0.project.0""", """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
A__ : Any = name.replace("""pretrained.act_postprocess1.3""", """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
A__ : List[Any] = name.replace("""pretrained.act_postprocess1.4""", """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
A__ : Dict = name.replace("""pretrained.act_postprocess2.3""", """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
A__ : Optional[Any] = name.replace("""pretrained.act_postprocess2.4""", """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
A__ : Union[str, Any] = name.replace("""pretrained.act_postprocess3.3""", """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
A__ : Optional[int] = name.replace("""pretrained.act_postprocess4.3""", """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
A__ : Dict = name.replace("""pretrained.act_postprocess4.4""", """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
A__ : Union[str, Any] = name.replace("""pretrained""", """dpt""" )
if "bn" in name:
A__ : Union[str, Any] = name.replace("""bn""", """batch_norm""" )
if "head" in name:
A__ : Dict = name.replace("""head""", """head.head""" )
if "encoder.norm" in name:
A__ : Optional[int] = name.replace("""encoder.norm""", """layernorm""" )
if "auxlayer" in name:
A__ : List[str] = name.replace("""auxlayer""", """auxiliary_head.head""" )
return name
def _lowerCAmelCase ( UpperCAmelCase__ : int, UpperCAmelCase__ : Dict ) ->str:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ : Any = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight' )
A__ : Tuple = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
A__ : List[str] = in_proj_weight[: config.hidden_size, :]
A__ : int = in_proj_bias[: config.hidden_size]
A__ : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ : str = in_proj_weight[
-config.hidden_size :, :
]
A__ : Optional[Any] = in_proj_bias[-config.hidden_size :]
def _lowerCAmelCase ( ) ->List[str]:
A__ : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A__ : int = Image.open(requests.get(UpperCAmelCase__, stream=UpperCAmelCase__ ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase ( UpperCAmelCase__ : int, UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : str, UpperCAmelCase__ : int ) ->str:
A__ , A__ : Dict = get_dpt_config(UpperCAmelCase__ )
# load original state_dict from URL
A__ : Any = torch.hub.load_state_dict_from_url(UpperCAmelCase__, map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(UpperCAmelCase__ )
# rename keys
for key in state_dict.copy().keys():
A__ : int = state_dict.pop(UpperCAmelCase__ )
A__ : str = val
# read in qkv matrices
read_in_q_k_v(UpperCAmelCase__, UpperCAmelCase__ )
# load HuggingFace model
A__ : Optional[Any] = DPTForSemanticSegmentation(UpperCAmelCase__ ) if """ade""" in checkpoint_url else DPTForDepthEstimation(UpperCAmelCase__ )
model.load_state_dict(UpperCAmelCase__ )
model.eval()
# Check outputs on an image
A__ : Optional[Any] = 4_8_0 if """ade""" in checkpoint_url else 3_8_4
A__ : Dict = DPTImageProcessor(size=UpperCAmelCase__ )
A__ : Optional[int] = prepare_img()
A__ : Any = image_processor(UpperCAmelCase__, return_tensors="""pt""" )
# forward pass
A__ : List[str] = model(**UpperCAmelCase__ ).logits if """ade""" in checkpoint_url else model(**UpperCAmelCase__ ).predicted_depth
# Assert logits
A__ : Optional[Any] = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]] )
if "ade" in checkpoint_url:
A__ : Optional[int] = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]] )
assert outputs.shape == torch.Size(UpperCAmelCase__ )
assert (
torch.allclose(outputs[0, 0, :3, :3], UpperCAmelCase__, atol=1e-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3], UpperCAmelCase__ )
)
Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCAmelCase__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(UpperCAmelCase__ )
if push_to_hub:
print("""Pushing model to hub...""" )
model.push_to_hub(
repo_path_or_name=Path(UpperCAmelCase__, UpperCAmelCase__ ), organization="""nielsr""", commit_message="""Add model""", use_temp_dir=UpperCAmelCase__, )
image_processor.push_to_hub(
repo_path_or_name=Path(UpperCAmelCase__, UpperCAmelCase__ ), organization="""nielsr""", commit_message="""Add image processor""", use_temp_dir=UpperCAmelCase__, )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''',
type=str,
help='''URL of the original DPT checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
parser.add_argument(
'''--model_name''',
default='''dpt-large''',
type=str,
help='''Name of the model, in case you\'re pushing to the hub.''',
)
A_ = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 296
| 0
|
"""simple docstring"""
def _lowerCAmelCase ( UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : List[str] ) ->float:
return base * power(lowercase_, (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print('''Raise base to the power of exponent using recursion...''')
A_ = int(input('''Enter the base: ''').strip())
A_ = int(input('''Enter the exponent: ''').strip())
A_ = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
A_ = 1 / result
print(F'{base} to the power of {exponent} is {result}')
| 362
|
"""simple docstring"""
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
A_ = '''src/diffusers'''
A_ = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
A_ = importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
A_ = spec.loader.load_module()
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Optional[Any] ) ->Any:
return line.startswith(UpperCAmelCase__ ) or len(UpperCAmelCase__ ) <= 1 or re.search(R"""^\s*\)(\s*->.*:|:)\s*$""", UpperCAmelCase__ ) is not None
def _lowerCAmelCase ( UpperCAmelCase__ : List[str] ) ->Union[str, Any]:
A__ : Any = object_name.split(""".""" )
A__ : int = 0
# First let's find the module where our object lives.
A__ : str = parts[i]
while i < len(UpperCAmelCase__ ) and not os.path.isfile(os.path.join(UpperCAmelCase__, f'{module}.py' ) ):
i += 1
if i < len(UpperCAmelCase__ ):
A__ : Union[str, Any] = os.path.join(UpperCAmelCase__, parts[i] )
if i >= len(UpperCAmelCase__ ):
raise ValueError(f'`object_name` should begin with the name of a module of diffusers but got {object_name}.' )
with open(os.path.join(UpperCAmelCase__, f'{module}.py' ), """r""", encoding="""utf-8""", newline="""\n""" ) as f:
A__ : List[Any] = f.readlines()
# Now let's find the class / func in the code!
A__ : Optional[Any] = """"""
A__ : Any = 0
for name in parts[i + 1 :]:
while (
line_index < len(UpperCAmelCase__ ) and re.search(Rf'^{indent}(class|def)\s+{name}(\(|\:)', lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(UpperCAmelCase__ ):
raise ValueError(f' {object_name} does not match any function or class in {module}.' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
A__ : List[Any] = line_index
while line_index < len(UpperCAmelCase__ ) and _should_continue(lines[line_index], UpperCAmelCase__ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
A__ : List[Any] = lines[start_index:line_index]
return "".join(UpperCAmelCase__ )
A_ = re.compile(r'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
A_ = re.compile(r'''^\s*(\S+)->(\S+)(\s+.*|$)''')
A_ = re.compile(r'''<FILL\s+[^>]*>''')
def _lowerCAmelCase ( UpperCAmelCase__ : List[str] ) ->Optional[Any]:
A__ : Dict = code.split("""\n""" )
A__ : List[Any] = 0
while idx < len(UpperCAmelCase__ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(UpperCAmelCase__ ):
return re.search(R"""^(\s*)\S""", lines[idx] ).groups()[0]
return ""
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[Any] ) ->int:
A__ : str = len(get_indent(UpperCAmelCase__ ) ) > 0
if has_indent:
A__ : Union[str, Any] = f'class Bla:\n{code}'
A__ : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa}, line_length=1_1_9, preview=UpperCAmelCase__ )
A__ : Tuple = black.format_str(UpperCAmelCase__, mode=UpperCAmelCase__ )
A__ , A__ : List[Any] = style_docstrings_in_code(UpperCAmelCase__ )
return result[len("""class Bla:\n""" ) :] if has_indent else result
def _lowerCAmelCase ( UpperCAmelCase__ : Any, UpperCAmelCase__ : Dict=False ) ->List[Any]:
with open(UpperCAmelCase__, """r""", encoding="""utf-8""", newline="""\n""" ) as f:
A__ : int = f.readlines()
A__ : Dict = []
A__ : List[str] = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(UpperCAmelCase__ ):
A__ : Dict = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
A__ , A__ , A__ : Dict = search.groups()
A__ : Tuple = find_code_in_diffusers(UpperCAmelCase__ )
A__ : int = get_indent(UpperCAmelCase__ )
A__ : List[str] = line_index + 1 if indent == theoretical_indent else line_index + 2
A__ : Tuple = theoretical_indent
A__ : Optional[Any] = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
A__ : Tuple = True
while line_index < len(UpperCAmelCase__ ) and should_continue:
line_index += 1
if line_index >= len(UpperCAmelCase__ ):
break
A__ : Optional[int] = lines[line_index]
A__ : Tuple = _should_continue(UpperCAmelCase__, UpperCAmelCase__ ) and re.search(f'^{indent}# End copy', UpperCAmelCase__ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
A__ : Dict = lines[start_index:line_index]
A__ : Tuple = """""".join(UpperCAmelCase__ )
# Remove any nested `Copied from` comments to avoid circular copies
A__ : Optional[int] = [line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(UpperCAmelCase__ ) is None]
A__ : Optional[Any] = """\n""".join(UpperCAmelCase__ )
# Before comparing, use the `replace_pattern` on the original code.
if len(UpperCAmelCase__ ) > 0:
A__ : int = replace_pattern.replace("""with""", """""" ).split(""",""" )
A__ : List[Any] = [_re_replace_pattern.search(UpperCAmelCase__ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
A__ , A__ , A__ : Union[str, Any] = pattern.groups()
A__ : Union[str, Any] = re.sub(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
if option.strip() == "all-casing":
A__ : List[Any] = re.sub(obja.lower(), obja.lower(), UpperCAmelCase__ )
A__ : Tuple = re.sub(obja.upper(), obja.upper(), UpperCAmelCase__ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
A__ : Optional[int] = blackify(lines[start_index - 1] + theoretical_code )
A__ : List[Any] = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
A__ : List[Any] = lines[:start_index] + [theoretical_code] + lines[line_index:]
A__ : Tuple = start_index + 1
if overwrite and len(UpperCAmelCase__ ) > 0:
# Warn the user a file has been modified.
print(f'Detected changes, rewriting {filename}.' )
with open(UpperCAmelCase__, """w""", encoding="""utf-8""", newline="""\n""" ) as f:
f.writelines(UpperCAmelCase__ )
return diffs
def _lowerCAmelCase ( UpperCAmelCase__ : bool = False ) ->Any:
A__ : Dict = glob.glob(os.path.join(UpperCAmelCase__, """**/*.py""" ), recursive=UpperCAmelCase__ )
A__ : str = []
for filename in all_files:
A__ : Any = is_copy_consistent(UpperCAmelCase__, UpperCAmelCase__ )
diffs += [f'- {filename}: copy does not match {d[0]} at line {d[1]}' for d in new_diffs]
if not overwrite and len(UpperCAmelCase__ ) > 0:
A__ : Any = """\n""".join(UpperCAmelCase__ )
raise Exception(
"""Found the following copy inconsistencies:\n"""
+ diff
+ """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
A_ = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 296
| 0
|
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
A_ = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
A_ = 12_8022
A_ = 12_8028
@require_sentencepiece
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase ):
snake_case_ = MaMaaaTokenizer
snake_case_ = False
snake_case_ = False
snake_case_ = True
def _UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
super().setUp()
A__ : Union[str, Any] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
A__ : Optional[Any] = dict(zip(_a , range(len(_a ) ) ) )
A__ : List[Any] = Path(self.tmpdirname )
save_json(_a , save_dir / VOCAB_FILES_NAMES["""vocab_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_a , save_dir / VOCAB_FILES_NAMES["""spm_file"""] )
A__ : List[str] = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCamelCase ( self : List[str] , **snake_case : int ):
'''simple docstring'''
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **_a )
def _UpperCamelCase ( self : Any , snake_case : Union[str, Any] ):
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
A__ : List[Any] = """</s>"""
A__ : Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : List[str] = self.get_tokenizer()
A__ : Optional[int] = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<s>""" )
self.assertEqual(len(_a ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("""Skip this test while all models are still to be uploaded.""" )
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
pass
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : str = self.get_tokenizer()
A__ : Optional[Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_a , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [2, 3, 4, 5, 6] , )
A__ : Tuple = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(_a , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
A__ : Any = tokenizer.convert_tokens_to_string(_a )
self.assertEqual(_a , """This is a test""" )
@slow
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : str = {"""input_ids""": [[12_8022, 11_0108, 397, 11, 3_8272, 2247, 12_4811, 285, 1_8105, 1586, 207, 7, 3_9534, 4428, 397, 1019, 1_8105, 1586, 207, 7, 4_1337, 1_6786, 241, 7, 2_0214, 17, 12_5690, 1_0398, 7, 4_4378, 5_8069, 6_8342, 7798, 7343, 11, 299, 3_3310, 4, 158, 3_7350, 9_4077, 4569, 299, 3_3310, 90, 4, 5_2840, 290, 4, 3_1270, 112, 299, 682, 4, 5_2840, 3_9953, 1_4079, 193, 5_2519, 9_0894, 1_7894, 12_0697, 11, 4_0445, 551, 17, 1019, 5_2519, 9_0894, 1_7756, 963, 11, 4_0445, 480, 17, 9792, 1120, 5173, 1393, 6240, 1_6786, 241, 12_0996, 28, 1245, 1393, 11_8240, 1_1123, 1019, 9_3612, 2691, 1_0618, 9_8058, 12_0409, 1928, 279, 4, 4_0683, 367, 178, 207, 1019, 103, 10_3121, 506, 6_5296, 5, 2], [12_8022, 2_1217, 367, 117, 12_5450, 128, 719, 7, 7308, 40, 9_3612, 1_2669, 1116, 1_6704, 71, 1_7785, 3699, 1_5592, 35, 144, 9584, 241, 1_1943, 713, 950, 799, 2247, 8_8427, 150, 149, 11_8813, 12_0706, 1019, 10_6906, 8_1518, 28, 1224, 2_2799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [12_8022, 1658, 12_3311, 5155, 5578, 4722, 279, 1_4947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name="""facebook/m2m100_418M""" , revision="""c168bae485c864188cf9aa0e4108b0b6934dc91e""" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
snake_case_ = '''facebook/m2m100_418M'''
snake_case_ = [
'''In my opinion, there are two levels of response from the French government.''',
'''NSA Affair Emphasizes Complete Lack of Debate on Intelligence''',
]
snake_case_ = [
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
]
# fmt: off
snake_case_ = [EN_CODE, 593, 1949, 115781, 4, 71586, 4234, 60633, 126233, 432, 123808, 15592, 1197, 117132, 120618, 5, 2]
@classmethod
def _UpperCamelCase ( cls : Any ):
'''simple docstring'''
A__ : Any = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en""" , tgt_lang="""fr""" )
A__ : Union[str, Any] = 1
return cls
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
self.assertEqual(self.tokenizer.get_lang_id("""ar""" ) , 12_8006 )
self.assertEqual(self.tokenizer.get_lang_id("""en""" ) , 12_8022 )
self.assertEqual(self.tokenizer.get_lang_id("""ro""" ) , 12_8076 )
self.assertEqual(self.tokenizer.get_lang_id("""mr""" ) , 12_8063 )
def _UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
A__ : Dict = self.tokenizer.get_vocab()
self.assertEqual(len(_a ) , self.tokenizer.vocab_size )
self.assertEqual(vocab["""<unk>"""] , 3 )
self.assertIn(self.tokenizer.get_lang_token("""en""" ) , _a )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : Optional[Any] = """en"""
A__ : str = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _a )
def _UpperCamelCase ( self : str ):
'''simple docstring'''
self.assertIn(_a , self.tokenizer.all_special_ids )
# fmt: off
A__ : Optional[Any] = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 1_4028, 136, 3286, 9706, 6, 9_0797, 6, 14_4012, 162, 8_8128, 3_0061, 5, 2]
# fmt: on
A__ : List[Any] = self.tokenizer.decode(_a , skip_special_tokens=_a )
A__ : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_a )
self.assertEqual(_a , _a )
self.assertNotIn(self.tokenizer.eos_token , _a )
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : Dict = tempfile.mkdtemp()
A__ : List[Any] = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(_a )
A__ : Optional[Any] = MaMaaaTokenizer.from_pretrained(_a )
self.assertDictEqual(new_tok.lang_token_to_id , _a )
@require_torch
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
A__ : List[str] = """en"""
A__ : Any = """fr"""
A__ : Dict = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_a , return_tensors="""pt""" )
A__ : Optional[Any] = shift_tokens_right(
batch["""labels"""] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
A__ : Dict = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : List[Any] = """mr"""
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""mr""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
A__ : Any = """zh"""
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""zh""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
A__ : Optional[Any] = """mr"""
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""mr""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
A__ : Optional[int] = """zh"""
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""zh""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : Tuple = self.tokenizer._build_translation_inputs("""A test""" , return_tensors="""pt""" , src_lang="""en""" , tgt_lang="""ar""" )
self.assertEqual(
nested_simplify(_a ) , {
# en_XX, A, test, EOS
"""input_ids""": [[12_8022, 58, 4183, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 12_8006,
} , )
| 363
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A_ = {
'''configuration_llama''': ['''LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LlamaConfig'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ['''LlamaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ['''LlamaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''LlamaForCausalLM''',
'''LlamaModel''',
'''LlamaPreTrainedModel''',
'''LlamaForSequenceClassification''',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 296
| 0
|
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _lowerCAmelCase ( ) ->Optional[Any]:
A__ : str = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""", type=_a, default=1, help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""", type=_a, help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
), )
# rest from the training program
parser.add_argument("""training_script_args""", nargs=_a )
return parser.parse_args()
def _lowerCAmelCase ( ) ->int:
A__ : str = parse_args()
# Import training_script as a module.
A__ : Any = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
A__ : Tuple = script_fpath.stem
A__ : str = importlib.import_module(_a )
# Patch sys.argv
A__ : Optional[int] = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn, args=(), nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 364
|
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
A_ = object()
# For specifying empty leaf dict `{}`
A_ = object()
def _lowerCAmelCase ( UpperCAmelCase__ : Tuple, UpperCAmelCase__ : List[Any] ) ->Dict:
A__ : Union[str, Any] = tuple((re.compile(x + """$""" ) for x in qs) )
for i in range(len(UpperCAmelCase__ ) - len(UpperCAmelCase__ ) + 1 ):
A__ : Optional[Any] = [x.match(UpperCAmelCase__ ) for x, y in zip(UpperCAmelCase__, ks[i:] )]
if matches and all(UpperCAmelCase__ ):
return True
return False
def _lowerCAmelCase ( UpperCAmelCase__ : List[Any] ) ->Dict:
def replace(UpperCAmelCase__ : int, UpperCAmelCase__ : List[str] ):
for rule, replacement in rules:
if _match(UpperCAmelCase__, UpperCAmelCase__ ):
return replacement
return val
return replace
def _lowerCAmelCase ( ) ->Tuple:
return [
# embeddings
(("transformer", "wpe", "embedding"), P("""mp""", UpperCAmelCase__ )),
(("transformer", "wte", "embedding"), P("""mp""", UpperCAmelCase__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(UpperCAmelCase__, """mp""" )),
(("attention", "out_proj", "kernel"), P("""mp""", UpperCAmelCase__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(UpperCAmelCase__, """mp""" )),
(("mlp", "c_fc", "bias"), P("""mp""" )),
(("mlp", "c_proj", "kernel"), P("""mp""", UpperCAmelCase__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def _lowerCAmelCase ( UpperCAmelCase__ : Tuple ) ->Any:
A__ : Union[str, Any] = _get_partition_rules()
A__ : int = _replacement_rules(UpperCAmelCase__ )
A__ : Tuple = {k: _unmatched for k in flatten_dict(UpperCAmelCase__ )}
A__ : Optional[int] = {k: replace(UpperCAmelCase__, UpperCAmelCase__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(UpperCAmelCase__ ) )
| 296
| 0
|
"""simple docstring"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def _lowerCAmelCase ( UpperCAmelCase__ : List[str] ) ->str:
return 1 / (1 + np.exp(-z ))
def _lowerCAmelCase ( UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Tuple ) ->Any:
return (-y * np.log(_UpperCAmelCase ) - (1 - y) * np.log(1 - h )).mean()
def _lowerCAmelCase ( UpperCAmelCase__ : str, UpperCAmelCase__ : int, UpperCAmelCase__ : int ) ->Any:
A__ : Tuple = np.dot(_UpperCAmelCase, _UpperCAmelCase )
return np.sum(y * scores - np.log(1 + np.exp(_UpperCAmelCase ) ) )
def _lowerCAmelCase ( UpperCAmelCase__ : Dict, UpperCAmelCase__ : Dict, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : List[Any]=7_0_0_0_0 ) ->Any:
A__ : List[str] = np.zeros(x.shape[1] )
for iterations in range(_UpperCAmelCase ):
A__ : Any = np.dot(_UpperCAmelCase, _UpperCAmelCase )
A__ : Any = sigmoid_function(_UpperCAmelCase )
A__ : Union[str, Any] = np.dot(x.T, h - y ) / y.size
A__ : Union[str, Any] = theta - alpha * gradient # updating the weights
A__ : Optional[Any] = np.dot(_UpperCAmelCase, _UpperCAmelCase )
A__ : List[Any] = sigmoid_function(_UpperCAmelCase )
A__ : str = cost_function(_UpperCAmelCase, _UpperCAmelCase )
if iterations % 1_0_0 == 0:
print(f'loss: {j} \t' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
A_ = datasets.load_iris()
A_ = iris.data[:, :2]
A_ = (iris.target != 0) * 1
A_ = 0.1
A_ = logistic_reg(alpha, x, y, max_iterations=7_0000)
print('''theta: ''', theta) # printing the theta i.e our weights vector
def _lowerCAmelCase ( UpperCAmelCase__ : int ) ->Optional[Any]:
return sigmoid_function(
np.dot(_UpperCAmelCase, _UpperCAmelCase ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''')
(A_) = (x[:, 0].min(), x[:, 0].max())
(A_) = (x[:, 1].min(), x[:, 1].max())
(A_) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
A_ = np.c_[xxa.ravel(), xxa.ravel()]
A_ = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''')
plt.legend()
plt.show()
| 365
|
"""simple docstring"""
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[str] , snake_case : Tuple , snake_case : List[str]=2 , snake_case : List[str]=8 , snake_case : List[Any]=True , snake_case : Optional[Any]=True , snake_case : List[Any]=True , snake_case : Dict=True , snake_case : Tuple=99 , snake_case : Dict=16 , snake_case : Dict=5 , snake_case : int=2 , snake_case : Any=36 , snake_case : str="gelu" , snake_case : Dict=0.0 , snake_case : List[Any]=0.0 , snake_case : int=512 , snake_case : List[Any]=16 , snake_case : Tuple=2 , snake_case : Any=0.02 , snake_case : Optional[Any]=3 , snake_case : List[Any]=4 , snake_case : str=None , ):
'''simple docstring'''
A__ : Union[str, Any] = parent
A__ : Optional[Any] = batch_size
A__ : Dict = seq_length
A__ : str = is_training
A__ : Tuple = use_input_mask
A__ : Dict = use_token_type_ids
A__ : Dict = use_labels
A__ : int = vocab_size
A__ : List[str] = hidden_size
A__ : Union[str, Any] = num_hidden_layers
A__ : int = num_attention_heads
A__ : List[str] = intermediate_size
A__ : int = hidden_act
A__ : str = hidden_dropout_prob
A__ : Tuple = attention_probs_dropout_prob
A__ : Any = max_position_embeddings
A__ : Optional[int] = type_vocab_size
A__ : int = type_sequence_label_size
A__ : Optional[Any] = initializer_range
A__ : int = num_labels
A__ : Optional[int] = num_choices
A__ : Optional[int] = scope
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
A__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Any = None
if self.use_input_mask:
A__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
A__ : Optional[int] = None
if self.use_token_type_ids:
A__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ : Dict = None
A__ : List[str] = None
A__ : Union[str, Any] = None
if self.use_labels:
A__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : Any = ids_tensor([self.batch_size] , self.num_choices )
A__ : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : Any = self.get_config()
A__ : List[str] = 300
return config
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : Tuple = self.prepare_config_and_inputs()
A__ : List[str] = True
A__ : List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A__ : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _UpperCamelCase ( self : Any , snake_case : Any , snake_case : Tuple , snake_case : Any , snake_case : Optional[int] , snake_case : Optional[int] , snake_case : Optional[int] , snake_case : Dict ):
'''simple docstring'''
A__ : List[str] = MraModel(config=snake_case )
model.to(snake_case )
model.eval()
A__ : Dict = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )
A__ : List[str] = model(snake_case , token_type_ids=snake_case )
A__ : Union[str, Any] = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : Optional[Any] , snake_case : List[Any] , snake_case : Any , snake_case : Optional[Any] , snake_case : Union[str, Any] , snake_case : Tuple , snake_case : Dict , snake_case : str , snake_case : Dict , snake_case : str , ):
'''simple docstring'''
A__ : Dict = True
A__ : Optional[Any] = MraModel(snake_case )
model.to(snake_case )
model.eval()
A__ : Union[str, Any] = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , )
A__ : str = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , encoder_hidden_states=snake_case , )
A__ : Optional[int] = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : int , snake_case : Optional[Any] , snake_case : Optional[int] , snake_case : int , snake_case : str , snake_case : Union[str, Any] , snake_case : Dict , snake_case : List[str] ):
'''simple docstring'''
A__ : Union[str, Any] = MraForMaskedLM(config=snake_case )
model.to(snake_case )
model.eval()
A__ : List[Any] = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self : Optional[Any] , snake_case : Dict , snake_case : Dict , snake_case : Dict , snake_case : List[str] , snake_case : List[str] , snake_case : Tuple , snake_case : Union[str, Any] ):
'''simple docstring'''
A__ : Dict = MraForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
A__ : str = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , start_positions=snake_case , end_positions=snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self : Tuple , snake_case : List[Any] , snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : List[str] , snake_case : Optional[int] , snake_case : List[str] , snake_case : Union[str, Any] ):
'''simple docstring'''
A__ : str = self.num_labels
A__ : Optional[Any] = MraForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
A__ : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self : Union[str, Any] , snake_case : Dict , snake_case : str , snake_case : List[Any] , snake_case : Any , snake_case : Dict , snake_case : Tuple , snake_case : Optional[Any] ):
'''simple docstring'''
A__ : str = self.num_labels
A__ : Union[str, Any] = MraForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
A__ : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self : Tuple , snake_case : Optional[Any] , snake_case : Optional[int] , snake_case : int , snake_case : Optional[Any] , snake_case : List[str] , snake_case : Dict , snake_case : Optional[Any] ):
'''simple docstring'''
A__ : List[str] = self.num_choices
A__ : str = MraForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
A__ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : str = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : List[str] = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : Dict = config_and_inputs
A__ : Optional[int] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase ):
snake_case_ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = ()
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : Optional[Any] = MraModelTester(self )
A__ : List[str] = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A__ : List[str] = type
self.model_tester.create_and_check_model(*snake_case )
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case )
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case )
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
A__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case )
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case )
@slow
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : str = MraModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@unittest.skip(reason="""MRA does not output attentions""" )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
return
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : str = MraModel.from_pretrained("""uw-madison/mra-base-512-4""" )
A__ : Any = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
A__ : List[Any] = model(snake_case )[0]
A__ : List[Any] = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , snake_case )
A__ : int = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) )
@slow
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : Union[str, Any] = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-512-4""" )
A__ : Tuple = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
A__ : List[Any] = model(snake_case )[0]
A__ : Dict = 5_0265
A__ : List[str] = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , snake_case )
A__ : List[Any] = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) )
@slow
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
A__ : Any = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-4096-8-d3""" )
A__ : List[Any] = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
A__ : List[Any] = model(snake_case )[0]
A__ : Union[str, Any] = 5_0265
A__ : Optional[Any] = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape , snake_case )
A__ : Optional[int] = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) )
| 296
| 0
|
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def _lowerCAmelCase ( UpperCAmelCase__ : str, UpperCAmelCase__ : List[str], UpperCAmelCase__ : Optional[int] ) ->str:
A__ : Dict = 1.5
A__ : int = int(factor * num_class_images )
A__ : Tuple = ClipClient(
url="""https://knn.laion.ai/knn-service""", indice_name="""laion_400m""", num_images=SCREAMING_SNAKE_CASE_, aesthetic_weight=0.1 )
os.makedirs(f'{class_data_dir}/images', exist_ok=SCREAMING_SNAKE_CASE_ )
if len(list(Path(f'{class_data_dir}/images' ).iterdir() ) ) >= num_class_images:
return
while True:
A__ : str = client.query(text=SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) >= factor * num_class_images or num_images > 1e4:
break
else:
A__ : List[str] = int(factor * num_images )
A__ : Optional[Any] = ClipClient(
url="""https://knn.laion.ai/knn-service""", indice_name="""laion_400m""", num_images=SCREAMING_SNAKE_CASE_, aesthetic_weight=0.1, )
A__ : int = 0
A__ : Tuple = 0
A__ : Tuple = tqdm(desc="""downloading real regularization images""", total=SCREAMING_SNAKE_CASE_ )
with open(f'{class_data_dir}/caption.txt', """w""" ) as fa, open(f'{class_data_dir}/urls.txt', """w""" ) as fa, open(
f'{class_data_dir}/images.txt', """w""" ) as fa:
while total < num_class_images:
A__ : Tuple = class_images[count]
count += 1
try:
A__ : int = requests.get(images["""url"""] )
if img.status_code == 2_0_0:
A__ : int = Image.open(BytesIO(img.content ) )
with open(f'{class_data_dir}/images/{total}.jpg', """wb""" ) as f:
f.write(img.content )
fa.write(images["""caption"""] + """\n""" )
fa.write(images["""url"""] + """\n""" )
fa.write(f'{class_data_dir}/images/{total}.jpg' + """\n""" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def _lowerCAmelCase ( ) ->List[str]:
A__ : int = argparse.ArgumentParser("""""", add_help=SCREAMING_SNAKE_CASE_ )
parser.add_argument("""--class_prompt""", help="""text prompt to retrieve images""", required=SCREAMING_SNAKE_CASE_, type=SCREAMING_SNAKE_CASE_ )
parser.add_argument("""--class_data_dir""", help="""path to save images""", required=SCREAMING_SNAKE_CASE_, type=SCREAMING_SNAKE_CASE_ )
parser.add_argument("""--num_class_images""", help="""number of images to download""", default=2_0_0, type=SCREAMING_SNAKE_CASE_ )
return parser.parse_args()
if __name__ == "__main__":
A_ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 366
|
"""simple docstring"""
from sklearn.metrics import mean_squared_error
import datasets
A_ = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
A_ = '''\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
'''
A_ = '''
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric("mse")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{\'mse\': 0.6123724356957945}
If you\'re using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric("mse", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mse\': array([0.41666667, 1. ])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"""
] , )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("""float""" ) ),
"references": datasets.Sequence(datasets.Value("""float""" ) ),
}
else:
return {
"predictions": datasets.Value("""float""" ),
"references": datasets.Value("""float""" ),
}
def _UpperCamelCase ( self : List[str] , snake_case : Dict , snake_case : List[Any] , snake_case : List[str]=None , snake_case : List[Any]="uniform_average" , snake_case : int=True ):
'''simple docstring'''
A__ : Optional[int] = mean_squared_error(
snake_case , snake_case , sample_weight=snake_case , multioutput=snake_case , squared=snake_case )
return {"mse": mse}
| 296
| 0
|
"""simple docstring"""
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 367
|
"""simple docstring"""
import warnings
from ..trainer import Trainer
from ..utils import logging
A_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
def __init__( self : Optional[int] , snake_case : List[str]=None , **snake_case : Any ):
'''simple docstring'''
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""" , snake_case , )
super().__init__(args=snake_case , **snake_case )
| 296
| 0
|
def _lowerCAmelCase ( UpperCAmelCase__ : str, UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : List[str], UpperCAmelCase__ : Union[str, Any]=None ) ->List[Any]:
A__ : Tuple = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
A__ , A__ : Tuple = True, True
A__ : Dict = dfs(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
return path
def _lowerCAmelCase ( UpperCAmelCase__ : int, UpperCAmelCase__ : Dict ) ->List[str]:
A__ : List[Any] = 0
A__ : Union[str, Any] = -1
for i in range(__lowerCamelCase ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
A__ : str = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def _lowerCAmelCase ( UpperCAmelCase__ : str, UpperCAmelCase__ : Union[str, Any] ) ->int:
A__ : int = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
A__ , A__ : List[Any] = check_circuit_or_path(__lowerCamelCase, __lowerCamelCase )
if check == 3:
print("""graph is not Eulerian""" )
print("""no path""" )
return
A__ : Dict = 1
if check == 2:
A__ : int = odd_node
print("""graph has a Euler path""" )
if check == 1:
print("""graph has a Euler cycle""" )
A__ : Tuple = dfs(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
print(__lowerCamelCase )
def _lowerCAmelCase ( ) ->Dict:
A__ : List[str] = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
A__ : List[str] = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
A__ : Optional[int] = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
A__ : Any = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
A__ : Optional[int] = {
1: [],
2: []
# all degree is zero
}
A__ : Dict = 1_0
check_euler(__lowerCamelCase, __lowerCamelCase )
check_euler(__lowerCamelCase, __lowerCamelCase )
check_euler(__lowerCamelCase, __lowerCamelCase )
check_euler(__lowerCamelCase, __lowerCamelCase )
check_euler(__lowerCamelCase, __lowerCamelCase )
if __name__ == "__main__":
main()
| 368
|
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
A_ = random.Random()
def _lowerCAmelCase ( UpperCAmelCase__ : Dict, UpperCAmelCase__ : Tuple=1.0, UpperCAmelCase__ : Optional[int]=None, UpperCAmelCase__ : str=None ) ->Union[str, Any]:
if rng is None:
A__ : Optional[int] = global_rng
A__ : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Union[str, Any] , snake_case : str , snake_case : List[str]=7 , snake_case : str=400 , snake_case : Optional[Any]=2000 , snake_case : Union[str, Any]=10 , snake_case : str=160 , snake_case : List[str]=8 , snake_case : List[Any]=0.0 , snake_case : Optional[Any]=4000 , snake_case : Any=False , snake_case : int=True , ):
'''simple docstring'''
A__ : Any = parent
A__ : str = batch_size
A__ : List[str] = min_seq_length
A__ : Dict = max_seq_length
A__ : str = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A__ : Dict = padding_value
A__ : Optional[Any] = sampling_rate
A__ : Any = return_attention_mask
A__ : Optional[int] = do_normalize
A__ : Tuple = feature_size
A__ : Optional[Any] = chunk_length
A__ : Union[str, Any] = hop_length
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _UpperCamelCase ( self : Union[str, Any] , snake_case : Dict=False , snake_case : Optional[Any]=False ):
'''simple docstring'''
def _flatten(snake_case : Dict ):
return list(itertools.chain(*snake_case ) )
if equal_length:
A__ : Dict = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
A__ : Optional[int] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
A__ : List[str] = [np.asarray(snake_case ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase ):
snake_case_ = WhisperFeatureExtractor if is_speech_available() else None
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
A__ : str = WhisperFeatureExtractionTester(self )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : List[Any] = feat_extract_first.save_pretrained(snake_case )[0]
check_json_file_has_correct_format(snake_case )
A__ : Union[str, Any] = self.feature_extraction_class.from_pretrained(snake_case )
A__ : str = feat_extract_first.to_dict()
A__ : Union[str, Any] = feat_extract_second.to_dict()
A__ : List[Any] = feat_extract_first.mel_filters
A__ : Optional[Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(snake_case , snake_case ) )
self.assertEqual(snake_case , snake_case )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Any = os.path.join(snake_case , """feat_extract.json""" )
feat_extract_first.to_json_file(snake_case )
A__ : int = self.feature_extraction_class.from_json_file(snake_case )
A__ : Dict = feat_extract_first.to_dict()
A__ : str = feat_extract_second.to_dict()
A__ : str = feat_extract_first.mel_filters
A__ : Dict = feat_extract_second.mel_filters
self.assertTrue(np.allclose(snake_case , snake_case ) )
self.assertEqual(snake_case , snake_case )
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
A__ : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
A__ : str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
A__ : Union[str, Any] = [np.asarray(snake_case ) for speech_input in speech_inputs]
# Test feature size
A__ : Dict = feature_extractor(snake_case , padding="""max_length""" , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
A__ : str = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
A__ : Optional[int] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(snake_case , snake_case , atol=1e-3 ) )
# Test batched
A__ : List[str] = feature_extractor(snake_case , return_tensors="""np""" ).input_features
A__ : List[str] = feature_extractor(snake_case , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(snake_case , snake_case ):
self.assertTrue(np.allclose(snake_case , snake_case , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
A__ : Tuple = [floats_list((1, x) )[0] for x in (800, 800, 800)]
A__ : str = np.asarray(snake_case )
A__ : List[str] = feature_extractor(snake_case , return_tensors="""np""" ).input_features
A__ : Optional[int] = feature_extractor(snake_case , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(snake_case , snake_case ):
self.assertTrue(np.allclose(snake_case , snake_case , atol=1e-3 ) )
# Test truncation required
A__ : Optional[Any] = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
A__ : Union[str, Any] = [np.asarray(snake_case ) for speech_input in speech_inputs]
A__ : Union[str, Any] = [x[: feature_extractor.n_samples] for x in speech_inputs]
A__ : str = [np.asarray(snake_case ) for speech_input in speech_inputs_truncated]
A__ : Optional[int] = feature_extractor(snake_case , return_tensors="""np""" ).input_features
A__ : str = feature_extractor(snake_case , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(snake_case , snake_case ):
self.assertTrue(np.allclose(snake_case , snake_case , atol=1e-3 ) )
def _UpperCamelCase ( self : str ):
'''simple docstring'''
import torch
A__ : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ : List[str] = np.random.rand(100 , 32 ).astype(np.floataa )
A__ : Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
A__ : Optional[Any] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
A__ : Optional[int] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def _UpperCamelCase ( self : Optional[Any] , snake_case : Optional[int] ):
'''simple docstring'''
A__ : int = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
A__ : Union[str, Any] = ds.sort("""id""" ).select(range(snake_case ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : str = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
A__ : Optional[Any] = self._load_datasamples(1 )
A__ : Union[str, Any] = WhisperFeatureExtractor()
A__ : List[str] = feature_extractor(snake_case , return_tensors="""pt""" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , snake_case , atol=1e-4 ) )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ : Union[str, Any] = self._load_datasamples(1 )[0]
A__ : Any = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
A__ : str = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=snake_case )[0]
self.assertTrue(np.all(np.mean(snake_case ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(snake_case ) - 1 ) < 1e-3 ) )
| 296
| 0
|
"""simple docstring"""
A_ = {str(digit): digit**5 for digit in range(10)}
def _lowerCAmelCase ( UpperCAmelCase__ : int ) ->int:
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(_snake_case ) )
def _lowerCAmelCase ( ) ->int:
return sum(
number
for number in range(1_0_0_0, 1_0_0_0_0_0_0 )
if number == digits_fifth_powers_sum(_snake_case ) )
if __name__ == "__main__":
print(solution())
| 369
|
"""simple docstring"""
import numpy as np
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[str] ):
'''simple docstring'''
A__ : Optional[int] = (0, 0)
A__ : Dict = None
A__ : int = 0
A__ : str = 0
A__ : Optional[Any] = 0
def __eq__( self : str , snake_case : Optional[int] ):
'''simple docstring'''
return self.position == cell.position
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
print(self.position )
class __SCREAMING_SNAKE_CASE :
def __init__( self : int , snake_case : Any=(5, 5) ):
'''simple docstring'''
A__ : Optional[int] = np.zeros(snake_case )
A__ : List[Any] = world_size[0]
A__ : Dict = world_size[1]
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
print(self.w )
def _UpperCamelCase ( self : Optional[int] , snake_case : List[Any] ):
'''simple docstring'''
A__ : int = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
A__ : int = cell.position[0]
A__ : str = cell.position[1]
A__ : Any = []
for n in neughbour_cord:
A__ : List[Any] = current_x + n[0]
A__ : Tuple = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
A__ : List[Any] = Cell()
A__ : str = (x, y)
A__ : Optional[Any] = cell
neighbours.append(snake_case )
return neighbours
def _lowerCAmelCase ( UpperCAmelCase__ : List[str], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Dict ) ->Dict:
A__ : Union[str, Any] = []
A__ : Optional[int] = []
_open.append(UpperCAmelCase__ )
while _open:
A__ : List[Any] = np.argmin([n.f for n in _open] )
A__ : Union[str, Any] = _open[min_f]
_closed.append(_open.pop(UpperCAmelCase__ ) )
if current == goal:
break
for n in world.get_neigbours(UpperCAmelCase__ ):
for c in _closed:
if c == n:
continue
A__ : Dict = current.g + 1
A__ , A__ : int = n.position
A__ , A__ : Optional[int] = goal.position
A__ : Union[str, Any] = (ya - ya) ** 2 + (xa - xa) ** 2
A__ : Optional[int] = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(UpperCAmelCase__ )
A__ : List[str] = []
while current.parent is not None:
path.append(current.position )
A__ : Union[str, Any] = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
A_ = Gridworld()
# Start position and goal
A_ = Cell()
A_ = (0, 0)
A_ = Cell()
A_ = (4, 4)
print(F'path from {start.position} to {goal.position}')
A_ = astar(world, start, goal)
# Just for visual reasons.
for i in s:
A_ = 1
print(world.w)
| 296
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'''google/vivit-b-16x2-kinetics400''': (
'''https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'''
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case_ = 'vivit'
def __init__( self : Any , snake_case : List[Any]=224 , snake_case : int=32 , snake_case : str=[2, 16, 16] , snake_case : List[Any]=3 , snake_case : List[str]=768 , snake_case : List[str]=12 , snake_case : str=12 , snake_case : Optional[int]=3072 , snake_case : int="gelu_fast" , snake_case : Optional[Any]=0.0 , snake_case : Any=0.0 , snake_case : List[Any]=0.02 , snake_case : Tuple=1e-06 , snake_case : Tuple=True , **snake_case : List[str] , ):
'''simple docstring'''
A__ : Tuple = hidden_size
A__ : List[str] = num_hidden_layers
A__ : List[str] = num_attention_heads
A__ : Union[str, Any] = intermediate_size
A__ : Optional[Any] = hidden_act
A__ : str = hidden_dropout_prob
A__ : Optional[int] = attention_probs_dropout_prob
A__ : Optional[Any] = initializer_range
A__ : Dict = layer_norm_eps
A__ : Union[str, Any] = image_size
A__ : Optional[int] = num_frames
A__ : str = tubelet_size
A__ : int = num_channels
A__ : Dict = qkv_bias
super().__init__(**_a )
| 370
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
def _lowerCAmelCase ( UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Tuple=False ) ->str:
A__ : Optional[int] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'blocks.{i}.norm1.weight', f'deit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'blocks.{i}.norm1.bias', f'deit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((f'blocks.{i}.attn.proj.weight', f'deit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((f'blocks.{i}.attn.proj.bias', f'deit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'blocks.{i}.norm2.weight', f'deit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'blocks.{i}.norm2.bias', f'deit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'deit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'deit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'deit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'deit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """deit.embeddings.cls_token"""),
("""dist_token""", """deit.embeddings.distillation_token"""),
("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """deit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
A__ : Optional[int] = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("""norm.weight""", """deit.layernorm.weight"""),
("""norm.bias""", """deit.layernorm.bias"""),
("""head.weight""", """cls_classifier.weight"""),
("""head.bias""", """cls_classifier.bias"""),
("""head_dist.weight""", """distillation_classifier.weight"""),
("""head_dist.bias""", """distillation_classifier.bias"""),
] )
return rename_keys
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Tuple, UpperCAmelCase__ : List[Any]=False ) ->str:
for i in range(config.num_hidden_layers ):
if base_model:
A__ : Any = """"""
else:
A__ : Tuple = """deit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ : Any = state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
A__ : Tuple = state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
A__ : List[Any] = in_proj_weight[
: config.hidden_size, :
]
A__ : str = in_proj_bias[: config.hidden_size]
A__ : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
A__ : Any = in_proj_bias[-config.hidden_size :]
def _lowerCAmelCase ( UpperCAmelCase__ : List[Any], UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Union[str, Any] ) ->Any:
A__ : int = dct.pop(UpperCAmelCase__ )
A__ : Tuple = val
def _lowerCAmelCase ( ) ->List[Any]:
A__ : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A__ : int = Image.open(requests.get(UpperCAmelCase__, stream=UpperCAmelCase__ ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase ( UpperCAmelCase__ : Dict, UpperCAmelCase__ : Any ) ->Tuple:
A__ : List[Any] = DeiTConfig()
# all deit models have fine-tuned heads
A__ : Tuple = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
A__ : str = 1_0_0_0
A__ : List[str] = """huggingface/label-files"""
A__ : Dict = """imagenet-1k-id2label.json"""
A__ : List[str] = json.load(open(hf_hub_download(UpperCAmelCase__, UpperCAmelCase__, repo_type="""dataset""" ), """r""" ) )
A__ : Dict = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()}
A__ : Optional[int] = idalabel
A__ : Dict = {v: k for k, v in idalabel.items()}
A__ : List[str] = int(deit_name[-6:-4] )
A__ : str = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("""tiny""" ):
A__ : List[str] = 1_9_2
A__ : int = 7_6_8
A__ : List[Any] = 1_2
A__ : Dict = 3
elif deit_name[9:].startswith("""small""" ):
A__ : List[Any] = 3_8_4
A__ : List[str] = 1_5_3_6
A__ : Any = 1_2
A__ : Union[str, Any] = 6
if deit_name[9:].startswith("""base""" ):
pass
elif deit_name[4:].startswith("""large""" ):
A__ : int = 1_0_2_4
A__ : str = 4_0_9_6
A__ : Any = 2_4
A__ : int = 1_6
# load original model from timm
A__ : Dict = timm.create_model(UpperCAmelCase__, pretrained=UpperCAmelCase__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A__ : Tuple = timm_model.state_dict()
A__ : str = create_rename_keys(UpperCAmelCase__, UpperCAmelCase__ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
read_in_q_k_v(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
# load HuggingFace model
A__ : str = DeiTForImageClassificationWithTeacher(UpperCAmelCase__ ).eval()
model.load_state_dict(UpperCAmelCase__ )
# Check outputs on an image, prepared by DeiTImageProcessor
A__ : int = int(
(2_5_6 / 2_2_4) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
A__ : Any = DeiTImageProcessor(size=UpperCAmelCase__, crop_size=config.image_size )
A__ : Union[str, Any] = image_processor(images=prepare_img(), return_tensors="""pt""" )
A__ : Optional[Any] = encoding["""pixel_values"""]
A__ : Union[str, Any] = model(UpperCAmelCase__ )
A__ : Union[str, Any] = timm_model(UpperCAmelCase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(UpperCAmelCase__, outputs.logits, atol=1e-3 )
Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ )
print(f'Saving model {deit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCAmelCase__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--deit_name''',
default='''vit_deit_base_distilled_patch16_224''',
type=str,
help='''Name of the DeiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
A_ = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 296
| 0
|
"""simple docstring"""
A_ = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_0000)]
def _lowerCAmelCase ( UpperCAmelCase__ : Any ) ->int:
A__ : str = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 1_0_0_0_0_0]
number //= 1_0_0_0_0_0
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
A_ = [None] * 1000_0000
A_ = True
A_ = False
def _lowerCAmelCase ( UpperCAmelCase__ : Any ) ->bool:
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
A__ : Optional[Any] = chain(next_number(_lowerCAmelCase ) )
A__ : Any = number_chain
while number < 1_0_0_0_0_0_0_0:
A__ : Any = number_chain
number *= 1_0
return number_chain
def _lowerCAmelCase ( UpperCAmelCase__ : str = 1_0_0_0_0_0_0_0 ) ->int:
for i in range(1, _lowerCAmelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(_lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{solution() = }')
| 371
|
"""simple docstring"""
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def _lowerCAmelCase ( UpperCAmelCase__ : Sequence[float], UpperCAmelCase__ : int, UpperCAmelCase__ : int ) ->tuple[int | None, int | None, float]:
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
A__ : Optional[int] = (low + high) // 2
A__ , A__ , A__ : List[Any] = max_subarray(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
A__ , A__ , A__ : Union[str, Any] = max_subarray(UpperCAmelCase__, mid + 1, UpperCAmelCase__ )
A__ , A__ , A__ : Union[str, Any] = max_cross_sum(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def _lowerCAmelCase ( UpperCAmelCase__ : Sequence[float], UpperCAmelCase__ : int, UpperCAmelCase__ : int, UpperCAmelCase__ : int ) ->tuple[int, int, float]:
A__ , A__ : Dict = float("""-inf""" ), -1
A__ , A__ : Optional[Any] = float("""-inf""" ), -1
A__ : int | float = 0
for i in range(UpperCAmelCase__, low - 1, -1 ):
summ += arr[i]
if summ > left_sum:
A__ : Optional[int] = summ
A__ : Union[str, Any] = i
A__ : Optional[Any] = 0
for i in range(mid + 1, high + 1 ):
summ += arr[i]
if summ > right_sum:
A__ : int = summ
A__ : Union[str, Any] = i
return max_left, max_right, (left_sum + right_sum)
def _lowerCAmelCase ( UpperCAmelCase__ : int ) ->float:
A__ : Union[str, Any] = [randint(1, UpperCAmelCase__ ) for _ in range(UpperCAmelCase__ )]
A__ : Any = time.time()
max_subarray(UpperCAmelCase__, 0, input_size - 1 )
A__ : List[Any] = time.time()
return end - start
def _lowerCAmelCase ( ) ->None:
A__ : List[Any] = [1_0, 1_0_0, 1_0_0_0, 1_0_0_0_0, 5_0_0_0_0, 1_0_0_0_0_0, 2_0_0_0_0_0, 3_0_0_0_0_0, 4_0_0_0_0_0, 5_0_0_0_0_0]
A__ : Any = [time_max_subarray(UpperCAmelCase__ ) for input_size in input_sizes]
print("""No of Inputs\t\tTime Taken""" )
for input_size, runtime in zip(UpperCAmelCase__, UpperCAmelCase__ ):
print(UpperCAmelCase__, """\t\t""", UpperCAmelCase__ )
plt.plot(UpperCAmelCase__, UpperCAmelCase__ )
plt.xlabel("""Number of Inputs""" )
plt.ylabel("""Time taken in seconds""" )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 296
| 0
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __SCREAMING_SNAKE_CASE ( __lowerCamelCase ):
snake_case_ = """Salesforce/blip-image-captioning-base"""
snake_case_ = (
"""This is a tool that generates a description of an image. It takes an input named `image` which should be the """
"""image to caption, and returns a text that contains the description in English."""
)
snake_case_ = """image_captioner"""
snake_case_ = AutoModelForVisionaSeq
snake_case_ = ["""image"""]
snake_case_ = ["""text"""]
def __init__( self : Optional[int] , *snake_case : List[Any] , **snake_case : Any ):
'''simple docstring'''
requires_backends(self , ["""vision"""] )
super().__init__(*__lowercase , **__lowercase )
def _UpperCamelCase ( self : List[str] , snake_case : "Image" ):
'''simple docstring'''
return self.pre_processor(images=__lowercase , return_tensors="""pt""" )
def _UpperCamelCase ( self : Optional[Any] , snake_case : Dict ):
'''simple docstring'''
return self.model.generate(**__lowercase )
def _UpperCamelCase ( self : Tuple , snake_case : List[Any] ):
'''simple docstring'''
return self.pre_processor.batch_decode(__lowercase , skip_special_tokens=__lowercase )[0].strip()
| 350
|
"""simple docstring"""
from __future__ import annotations
class __SCREAMING_SNAKE_CASE :
def __init__( self : Dict , snake_case : int ):
'''simple docstring'''
A__ : List[Any] = order
# a_{0} ... a_{k}
A__ : List[Any] = [1.0] + [0.0] * order
# b_{0} ... b_{k}
A__ : str = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
A__ : Union[str, Any] = [0.0] * self.order
# y[n-1] ... y[n-k]
A__ : List[str] = [0.0] * self.order
def _UpperCamelCase ( self : Optional[int] , snake_case : list[float] , snake_case : list[float] ):
'''simple docstring'''
if len(snake_case ) < self.order:
A__ : Any = [1.0, *a_coeffs]
if len(snake_case ) != self.order + 1:
A__ : str = (
F'Expected a_coeffs to have {self.order + 1} elements '
F'for {self.order}-order filter, got {len(snake_case )}'
)
raise ValueError(snake_case )
if len(snake_case ) != self.order + 1:
A__ : Union[str, Any] = (
F'Expected b_coeffs to have {self.order + 1} elements '
F'for {self.order}-order filter, got {len(snake_case )}'
)
raise ValueError(snake_case )
A__ : Dict = a_coeffs
A__ : Any = b_coeffs
def _UpperCamelCase ( self : List[str] , snake_case : float ):
'''simple docstring'''
A__ : str = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
A__ : Dict = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
A__ : Tuple = self.input_history[:-1]
A__ : int = self.output_history[:-1]
A__ : Dict = sample
A__ : Tuple = result
return result
| 296
| 0
|
"""simple docstring"""
from __future__ import annotations
A_ = 1.60_21e-19 # units = C
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Any, ) ->tuple[str, float]:
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif conductivity < 0:
raise ValueError("""Conductivity cannot be negative""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative""" )
elif mobility < 0:
raise ValueError("""mobility cannot be negative""" )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351
|
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[int] , snake_case : Optional[Any] , snake_case : Tuple=13 , snake_case : Dict=7 , snake_case : Optional[int]=True , snake_case : Union[str, Any]=True , snake_case : Dict=True , snake_case : Any=True , snake_case : List[str]=99 , snake_case : str=64 , snake_case : Optional[int]=5 , snake_case : str=4 , snake_case : List[Any]=37 , snake_case : Optional[Any]="gelu" , snake_case : List[str]=0.1 , snake_case : str=0.1 , snake_case : Optional[int]=512 , snake_case : Dict=16 , snake_case : List[Any]=2 , snake_case : Optional[int]=0.02 , snake_case : Any=3 , snake_case : Union[str, Any]=4 , snake_case : Dict=None , ):
'''simple docstring'''
A__ : Tuple = parent
A__ : Union[str, Any] = batch_size
A__ : List[str] = seq_length
A__ : Optional[int] = is_training
A__ : Dict = use_input_mask
A__ : Any = use_token_type_ids
A__ : Optional[Any] = use_labels
A__ : List[str] = vocab_size
A__ : Optional[int] = hidden_size
A__ : Optional[Any] = num_hidden_layers
A__ : Any = num_attention_heads
A__ : List[Any] = intermediate_size
A__ : Optional[Any] = hidden_act
A__ : Optional[int] = hidden_dropout_prob
A__ : Tuple = attention_probs_dropout_prob
A__ : str = max_position_embeddings
A__ : List[str] = type_vocab_size
A__ : Union[str, Any] = type_sequence_label_size
A__ : List[Any] = initializer_range
A__ : Optional[int] = num_labels
A__ : Dict = num_choices
A__ : Dict = scope
A__ : List[Any] = vocab_size - 1
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : List[Any] = None
if self.use_input_mask:
A__ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
A__ : Union[str, Any] = None
if self.use_labels:
A__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : Tuple = self.get_config()
return config, input_ids, input_mask, token_labels
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
A__ , A__ , A__ , A__ : str = self.prepare_config_and_inputs()
A__ : Union[str, Any] = True
return config, input_ids, input_mask, token_labels
def _UpperCamelCase ( self : Union[str, Any] , snake_case : Optional[int] , snake_case : List[str] , snake_case : int ):
'''simple docstring'''
A__ : Any = GPTNeoXModel(config=snake_case )
model.to(snake_case )
model.eval()
A__ : int = model(snake_case , attention_mask=snake_case )
A__ : Optional[int] = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : Union[str, Any] , snake_case : str , snake_case : Any , snake_case : Union[str, Any] ):
'''simple docstring'''
A__ : int = True
A__ : str = GPTNeoXModel(snake_case )
model.to(snake_case )
model.eval()
A__ : Tuple = model(snake_case , attention_mask=snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : Dict , snake_case : List[Any] , snake_case : str , snake_case : Optional[Any] , snake_case : Any ):
'''simple docstring'''
A__ : Any = GPTNeoXForCausalLM(config=snake_case )
model.to(snake_case )
model.eval()
A__ : Tuple = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self : List[str] , snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : Union[str, Any] , snake_case : Tuple ):
'''simple docstring'''
A__ : int = self.num_labels
A__ : int = GPTNeoXForQuestionAnswering(snake_case )
model.to(snake_case )
model.eval()
A__ : Optional[Any] = model(snake_case , attention_mask=snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self : str , snake_case : Tuple , snake_case : int , snake_case : int , snake_case : Dict ):
'''simple docstring'''
A__ : List[Any] = self.num_labels
A__ : Tuple = GPTNeoXForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
A__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : List[str] = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self : Any , snake_case : Union[str, Any] , snake_case : int , snake_case : Tuple , snake_case : Any ):
'''simple docstring'''
A__ : Tuple = self.num_labels
A__ : Any = GPTNeoXForTokenClassification(snake_case )
model.to(snake_case )
model.eval()
A__ : Dict = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self : List[str] , snake_case : List[str] , snake_case : Tuple , snake_case : Any ):
'''simple docstring'''
A__ : Optional[int] = True
A__ : Any = GPTNeoXForCausalLM(config=snake_case )
model.to(snake_case )
model.eval()
# first forward pass
A__ : Tuple = model(snake_case , attention_mask=snake_case , use_cache=snake_case )
A__ : str = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A__ : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
A__ : Any = torch.cat([input_ids, next_tokens] , dim=-1 )
A__ : Any = torch.cat([input_mask, next_mask] , dim=-1 )
A__ : Tuple = model(snake_case , attention_mask=snake_case , output_hidden_states=snake_case )
A__ : List[Any] = output_from_no_past["""hidden_states"""][0]
A__ : List[str] = model(
snake_case , attention_mask=snake_case , past_key_values=snake_case , output_hidden_states=snake_case , )["""hidden_states"""][0]
# select random slice
A__ : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A__ : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
A__ : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case , snake_case , atol=1e-3 ) )
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ : str = self.prepare_config_and_inputs()
A__ , A__ , A__ , A__ : Dict = config_and_inputs
A__ : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase ):
snake_case_ = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case_ = (GPTNeoXForCausalLM,) if is_torch_available() else ()
snake_case_ = (
{
'feature-extraction': GPTNeoXModel,
'question-answering': GPTNeoXForQuestionAnswering,
'text-classification': GPTNeoXForSequenceClassification,
'text-generation': GPTNeoXForCausalLM,
'token-classification': GPTNeoXForTokenClassification,
'zero-shot': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : Any = GPTNeoXModelTester(self )
A__ : Any = ConfigTester(self , config_class=snake_case , hidden_size=64 , num_attention_heads=8 )
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
A__ , A__ , A__ , A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(snake_case , snake_case , snake_case )
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
A__ , A__ , A__ , A__ : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(snake_case , snake_case , snake_case )
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
A__ , A__ , A__ , A__ : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
A__ : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(snake_case , snake_case , snake_case )
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ , A__ , A__ , A__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(snake_case , snake_case , snake_case )
def _UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
A__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*snake_case )
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case )
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case )
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case )
@unittest.skip(reason="""Feed forward chunking is not implemented""" )
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _UpperCamelCase ( self : Optional[Any] , snake_case : Optional[Any] ):
'''simple docstring'''
A__ , A__ : int = self.model_tester.prepare_config_and_inputs_for_common()
A__ : List[Any] = ids_tensor([1, 10] , config.vocab_size )
A__ : str = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A__ : Union[str, Any] = GPTNeoXModel(snake_case )
original_model.to(snake_case )
original_model.eval()
A__ : Optional[int] = original_model(snake_case ).last_hidden_state
A__ : List[str] = original_model(snake_case ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A__ : Optional[int] = {"""type""": scaling_type, """factor""": 10.0}
A__ : Optional[int] = GPTNeoXModel(snake_case )
scaled_model.to(snake_case )
scaled_model.eval()
A__ : List[str] = scaled_model(snake_case ).last_hidden_state
A__ : Tuple = scaled_model(snake_case ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case , snake_case , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(snake_case , snake_case , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case , snake_case , atol=1e-5 ) )
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : Any = AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
for checkpointing in [True, False]:
A__ : Optional[Any] = GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(snake_case )
A__ : Optional[Any] = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(snake_case )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
A__ : Union[str, Any] = """My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure"""
A__ : Tuple = model.generate(**snake_case , do_sample=snake_case , max_new_tokens=20 )
A__ : Tuple = tokenizer.batch_decode(snake_case )[0]
self.assertEqual(snake_case , snake_case )
| 296
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A_ = {
"""configuration_squeezebert""": [
"""SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SqueezeBertConfig""",
"""SqueezeBertOnnxConfig""",
],
"""tokenization_squeezebert""": ["""SqueezeBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ["""SqueezeBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SqueezeBertForMaskedLM""",
"""SqueezeBertForMultipleChoice""",
"""SqueezeBertForQuestionAnswering""",
"""SqueezeBertForSequenceClassification""",
"""SqueezeBertForTokenClassification""",
"""SqueezeBertModel""",
"""SqueezeBertModule""",
"""SqueezeBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 352
|
"""simple docstring"""
from collections import defaultdict
from math import gcd
def _lowerCAmelCase ( UpperCAmelCase__ : int = 1_5_0_0_0_0_0 ) ->int:
A__ : defaultdict = defaultdict(UpperCAmelCase__ )
A__ : Any = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1, UpperCAmelCase__, 2 ):
if gcd(UpperCAmelCase__, UpperCAmelCase__ ) > 1:
continue
A__ : str = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(UpperCAmelCase__, limit + 1, UpperCAmelCase__ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F'{solution() = }')
| 296
| 0
|
"""simple docstring"""
from collections import defaultdict
def _lowerCAmelCase ( UpperCAmelCase__ : str, UpperCAmelCase__ : int ) ->str:
A__ : Dict = first_str.lower().strip()
A__ : Union[str, Any] = second_str.lower().strip()
# Remove whitespace
A__ : Optional[int] = first_str.replace(""" """, """""" )
A__ : Union[str, Any] = second_str.replace(""" """, """""" )
# Strings of different lengths are not anagrams
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
return False
# Default values for count should be 0
A__ : int = defaultdict(lowerCamelCase__ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(lowerCamelCase__ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
A_ = input('''Enter the first string ''').strip()
A_ = input('''Enter the second string ''').strip()
A_ = check_anagrams(input_a, input_b)
print(F'{input_a} and {input_b} are {"" if status else "not "}anagrams.')
| 353
|
"""simple docstring"""
import os
from distutils.util import strtobool
def _lowerCAmelCase ( UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Optional[Any] ) ->List[str]:
for e in env_keys:
A__ : List[Any] = int(os.environ.get(UpperCAmelCase__, -1 ) )
if val >= 0:
return val
return default
def _lowerCAmelCase ( UpperCAmelCase__ : Tuple, UpperCAmelCase__ : str=False ) ->List[str]:
A__ : List[Any] = os.environ.get(UpperCAmelCase__, str(UpperCAmelCase__ ) )
return strtobool(UpperCAmelCase__ ) == 1 # As its name indicates `strtobool` actually returns an int...
def _lowerCAmelCase ( UpperCAmelCase__ : Tuple, UpperCAmelCase__ : List[Any]="no" ) ->int:
A__ : str = os.environ.get(UpperCAmelCase__, str(UpperCAmelCase__ ) )
return value
| 296
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.