code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=lowerCamelCase )
class lowerCamelCase_ ( lowerCamelCase ):
a__ = field(default='''audio-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
a__ = Features({'''audio''': Audio()} )
a__ = Features({'''labels''': ClassLabel} )
a__ = "audio"
a__ = "labels"
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
if self.label_column not in features:
raise ValueError(F'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , __lowerCAmelCase ):
raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' )
__magic_name__ :List[Any] = copy.deepcopy(self )
__magic_name__ :Optional[int] = self.label_schema.copy()
__magic_name__ :Optional[int] = features[self.label_column]
__magic_name__ :int = label_schema
return task_template
@property
def A ( self ):
"""simple docstring"""
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json""",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''blenderbot-small'''
SCREAMING_SNAKE_CASE_ : int = ['''past_key_values''']
SCREAMING_SNAKE_CASE_ : Tuple = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[str] ,__A : List[Any]=5_0265 ,__A : str=512 ,__A : Optional[int]=8 ,__A : Any=2048 ,__A : Tuple=16 ,__A : str=8 ,__A : int=2048 ,__A : List[str]=16 ,__A : Optional[int]=0.0 ,__A : Any=0.0 ,__A : int=True ,__A : List[Any]=True ,__A : Tuple="gelu" ,__A : Any=512 ,__A : Dict=0.1 ,__A : Tuple=0.0 ,__A : int=0.0 ,__A : int=0.02 ,__A : Dict=1 ,__A : str=False ,__A : Dict=0 ,__A : Union[str, Any]=1 ,__A : Optional[int]=2 ,__A : List[str]=2 ,**__A : Tuple ,) -> Tuple:
_lowercase = vocab_size
_lowercase = max_position_embeddings
_lowercase = d_model
_lowercase = encoder_ffn_dim
_lowercase = encoder_layers
_lowercase = encoder_attention_heads
_lowercase = decoder_ffn_dim
_lowercase = decoder_layers
_lowercase = decoder_attention_heads
_lowercase = dropout
_lowercase = attention_dropout
_lowercase = activation_dropout
_lowercase = activation_function
_lowercase = init_std
_lowercase = encoder_layerdrop
_lowercase = decoder_layerdrop
_lowercase = use_cache
_lowercase = encoder_layers
_lowercase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__A ,bos_token_id=__A ,eos_token_id=__A ,is_encoder_decoder=__A ,decoder_start_token_id=__A ,forced_eos_token_id=__A ,**__A ,)
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_lowercase = {0: 'batch'}
_lowercase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
_lowercase = {0: 'batch', 1: 'decoder_sequence'}
_lowercase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__A ,direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
_lowercase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_lowercase , _lowercase = self.num_layers
for i in range(__A ):
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
else:
_lowercase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def __UpperCAmelCase ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = super().outputs
else:
_lowercase = super(__A ,self ).outputs
if self.use_past:
_lowercase , _lowercase = self.num_layers
for i in range(__A ):
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def __UpperCAmelCase ( self : Optional[int] ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,__A ,__A ,__A ,__A )
# Generate decoder inputs
_lowercase = seq_length if not self.use_past else 1
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,__A ,__A ,__A ,__A )
_lowercase = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
_lowercase = dict(**__A ,**__A )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowercase , _lowercase = common_inputs['input_ids'].shape
_lowercase = common_inputs['decoder_input_ids'].shape[1]
_lowercase , _lowercase = self.num_attention_heads
_lowercase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowercase = decoder_seq_length + 3
_lowercase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_lowercase = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(__A ,__A )] ,dim=1 )
_lowercase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_lowercase , _lowercase = self.num_layers
_lowercase = min(__A ,__A )
_lowercase = max(__A ,__A ) - min_num_layers
_lowercase = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(__A ):
common_inputs["past_key_values"].append(
(
torch.zeros(__A ),
torch.zeros(__A ),
torch.zeros(__A ),
torch.zeros(__A ),
) )
# TODO: test this.
_lowercase = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(__A ,__A ):
common_inputs["past_key_values"].append((torch.zeros(__A ), torch.zeros(__A )) )
return common_inputs
def __UpperCAmelCase ( self : List[Any] ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,__A ,__A ,__A ,__A )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowercase , _lowercase = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_lowercase = seqlen + 2
_lowercase , _lowercase = self.num_layers
_lowercase , _lowercase = self.num_attention_heads
_lowercase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowercase = common_inputs['attention_mask'].dtype
_lowercase = torch.cat(
[common_inputs['attention_mask'], torch.ones(__A ,__A ,dtype=__A )] ,dim=1 )
_lowercase = [
(torch.zeros(__A ), torch.zeros(__A )) for _ in range(__A )
]
return common_inputs
def __UpperCAmelCase ( self : Any ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowercase = compute_effective_axis_dimension(
__A ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowercase = tokenizer.num_special_tokens_to_add(__A )
_lowercase = compute_effective_axis_dimension(
__A ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=__A )
# Generate dummy inputs according to compute batch and sequence
_lowercase = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
_lowercase = dict(tokenizer(__A ,return_tensors=__A ) )
return common_inputs
def __UpperCAmelCase ( self : Dict ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A )
elif self.task == "causal-lm":
_lowercase = self._generate_dummy_inputs_for_causal_lm(
__A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A )
else:
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A )
return common_inputs
def __UpperCAmelCase ( self : List[str] ,__A : Dict ,__A : Any ,__A : List[Any] ,__A : Tuple ) -> Union[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = super()._flatten_past_key_values_(__A ,__A ,__A ,__A )
else:
_lowercase = super(__A ,self )._flatten_past_key_values_(
__A ,__A ,__A ,__A ) | 67 | 0 |
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def _A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase = TapasConfig.from_json_file(_lowercase )
# set absolute/relative position embeddings parameter
__UpperCamelCase = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
__UpperCamelCase = TapasForQuestionAnswering(config=_lowercase )
elif task == "WTQ":
# run_task_main.py hparams
__UpperCamelCase = 4
__UpperCamelCase = True
# hparam_utils.py hparams
__UpperCamelCase = 0.66_46_94
__UpperCamelCase = 0.20_79_51
__UpperCamelCase = 0.12_11_94
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = 0.0_35_25_13
__UpperCamelCase = TapasForQuestionAnswering(config=_lowercase )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
__UpperCamelCase = 4
__UpperCamelCase = False
# hparam_utils.py hparams
__UpperCamelCase = 36.45_19
__UpperCamelCase = 0.90_34_21
__UpperCamelCase = 2_22.0_88
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = 0.76_31_41
__UpperCamelCase = TapasForQuestionAnswering(config=_lowercase )
elif task == "TABFACT":
__UpperCamelCase = TapasForSequenceClassification(config=_lowercase )
elif task == "MLM":
__UpperCamelCase = TapasForMaskedLM(config=_lowercase )
elif task == "INTERMEDIATE_PRETRAINING":
__UpperCamelCase = TapasModel(config=_lowercase )
else:
raise ValueError(f'''Task {task} not supported.''' )
print(f'''Building PyTorch model from configuration: {config}''' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(_lowercase , _lowercase , _lowercase )
# Save pytorch-model (weights and configuration)
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(_lowercase )
# Save tokenizer files
print(f'''Save tokenizer files to {pytorch_dump_path}''' )
__UpperCamelCase = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + 'vocab.txt' , model_max_length=5_12 )
tokenizer.save_pretrained(_lowercase )
print('Used relative position embeddings:' , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''', default='''SQA''', type=str, help='''Model task for which to convert a checkpoint. Defaults to SQA.'''
)
parser.add_argument(
'''--reset_position_index_per_cell''',
default=False,
action='''store_true''',
help='''Whether to use relative position embeddings or not. Defaults to True.''',
)
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--tapas_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained TAPAS model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__snake_case = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 1 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : Any ) -> str:
torch.manual_seed(0 )
_lowercase = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=('DownBlock2D', 'AttnDownBlock2D') ,up_block_types=('AttnUpBlock2D', 'UpBlock2D') ,)
return model
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
_lowercase = self.dummy_uncond_unet
_lowercase = KarrasVeScheduler()
_lowercase = KarrasVePipeline(unet=__A ,scheduler=__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_lowercase = torch.manual_seed(0 )
_lowercase = pipe(num_inference_steps=2 ,generator=__A ,output_type='numpy' ).images
_lowercase = torch.manual_seed(0 )
_lowercase = pipe(num_inference_steps=2 ,generator=__A ,output_type='numpy' ,return_dict=__A )[0]
_lowercase = image[0, -3:, -3:, -1]
_lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowercase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
_lowercase = 'google/ncsnpp-celebahq-256'
_lowercase = UNetaDModel.from_pretrained(__A )
_lowercase = KarrasVeScheduler()
_lowercase = KarrasVePipeline(unet=__A ,scheduler=__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_lowercase = torch.manual_seed(0 )
_lowercase = pipe(num_inference_steps=20 ,generator=__A ,output_type='numpy' ).images
_lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_lowercase = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 67 | 0 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class lowerCamelCase__ ( _A):
"""simple docstring"""
def __init__( self : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any]=13 , __lowerCAmelCase : str=7 , __lowerCAmelCase : int=True , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Union[str, Any]=99 , __lowerCAmelCase : Union[str, Any]=32 , __lowerCAmelCase : Any=5 , __lowerCAmelCase : List[Any]=4 , __lowerCAmelCase : Union[str, Any]=37 , __lowerCAmelCase : int="gelu" , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : Optional[Any]=5_12 , __lowerCAmelCase : Optional[int]=16 , __lowerCAmelCase : str=2 , __lowerCAmelCase : Union[str, Any]=0.02 , __lowerCAmelCase : int=3 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : Optional[int]=None , ) -> Union[str, Any]:
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_input_mask
_A = use_token_type_ids
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = type_sequence_label_size
_A = initializer_range
_A = num_labels
_A = num_choices
_A = scope
def snake_case_ ( self : Tuple ) -> Union[str, Any]:
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = None
if self.use_input_mask:
_A = random_attention_mask([self.batch_size, self.seq_length] )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A = ids_tensor([self.batch_size] , self.num_choices )
_A = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_ ( self : Any ) -> Union[str, Any]:
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def snake_case_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] ) -> Dict:
_A = DistilBertModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_A = model(__lowerCAmelCase , __lowerCAmelCase )
_A = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ) -> Any:
_A = DistilBertForMaskedLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_A = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ ( self : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] ) -> List[Any]:
_A = DistilBertForQuestionAnswering(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_A = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case_ ( self : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any ) -> Dict:
_A = self.num_labels
_A = DistilBertForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_A = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case_ ( self : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : List[str] ) -> Tuple:
_A = self.num_labels
_A = DistilBertForTokenClassification(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_A = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case_ ( self : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] ) -> Union[str, Any]:
_A = self.num_choices
_A = DistilBertForMultipleChoice(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_A = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case_ ( self : Optional[int] ) -> Union[str, Any]:
_A = self.prepare_config_and_inputs()
((_A) , (_A) , (_A) , (_A) , (_A) , (_A)) = config_and_inputs
_A = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( _A , _A , unittest.TestCase):
"""simple docstring"""
a__ : int = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
a__ : Dict = (
{
"feature-extraction": DistilBertModel,
"fill-mask": DistilBertForMaskedLM,
"question-answering": DistilBertForQuestionAnswering,
"text-classification": DistilBertForSequenceClassification,
"token-classification": DistilBertForTokenClassification,
"zero-shot": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ : int = True
a__ : Union[str, Any] = True
a__ : Optional[Any] = True
a__ : Dict = True
def snake_case_ ( self : List[str] ) -> Tuple:
_A = DistilBertModelTester(self )
_A = ConfigTester(self , config_class=__lowerCAmelCase , dim=37 )
def snake_case_ ( self : List[str] ) -> str:
self.config_tester.run_common_tests()
def snake_case_ ( self : Any ) -> str:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__lowerCAmelCase )
def snake_case_ ( self : Optional[Any] ) -> List[str]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__lowerCAmelCase )
def snake_case_ ( self : Dict ) -> int:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__lowerCAmelCase )
def snake_case_ ( self : Optional[Any] ) -> Union[str, Any]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__lowerCAmelCase )
def snake_case_ ( self : Dict ) -> Optional[int]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__lowerCAmelCase )
def snake_case_ ( self : Union[str, Any] ) -> Any:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__lowerCAmelCase )
@slow
def snake_case_ ( self : Optional[int] ) -> Dict:
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = DistilBertModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@slow
@require_torch_gpu
def snake_case_ ( self : Any ) -> str:
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
_A = True
_A = model_class(config=__lowerCAmelCase )
_A = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase )
_A = torch.jit.trace(
__lowerCAmelCase , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , '''traced_model.pt''' ) )
_A = torch.jit.load(os.path.join(__lowerCAmelCase , '''traced_model.pt''' ) , map_location=__lowerCAmelCase )
loaded(inputs_dict['''input_ids'''].to(__lowerCAmelCase ) , inputs_dict['''attention_mask'''].to(__lowerCAmelCase ) )
@require_torch
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
@slow
def snake_case_ ( self : Optional[Any] ) -> int:
_A = DistilBertModel.from_pretrained('''distilbert-base-uncased''' )
_A = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
_A = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_A = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )[0]
_A = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , __lowerCAmelCase )
_A = torch.tensor(
[[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowerCAmelCase , atol=1E-4 ) )
| 2 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str , snake_case__ :str ) -> list:
_lowercase = len(snake_case__ )
_lowercase = []
for i in range(len(snake_case__ ) - pat_len + 1 ):
_lowercase = True
for j in range(snake_case__ ):
if s[i + j] != pattern[j]:
_lowercase = False
break
if match_found:
position.append(snake_case__ )
return position
if __name__ == "__main__":
assert naive_pattern_search("""ABCDEFG""", """DE""") == [3]
print(naive_pattern_search("""ABAAABCDBBABCDDEBCABC""", """ABC""")) | 67 | 0 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import List, Optional
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def __init__( self )-> Tuple:
'''simple docstring'''
self.test()
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
UpperCamelCase = 0
UpperCamelCase = False
while not completed:
if counter == 1:
self.reset()
UpperCamelCase = self.advance()
if not self.does_advance(A_ ):
raise Exception(
'Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.' )
UpperCamelCase , UpperCamelCase , UpperCamelCase = self.update(A_ )
counter += 1
if counter > 10000:
raise Exception('update() does not fulfill the constraint.' )
if self.remaining() != 0:
raise Exception('Custom Constraint is not defined correctly.' )
@abstractmethod
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def UpperCAmelCase_ ( self , A_ )-> Dict:
'''simple docstring'''
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def UpperCAmelCase_ ( self , A_ )-> Optional[int]:
'''simple docstring'''
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def UpperCAmelCase_ ( self , A_=False )-> Optional[int]:
'''simple docstring'''
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def __init__( self , A_ )-> Dict:
'''simple docstring'''
super(A_ , self ).__init__()
if not isinstance(A_ , A_ ) or len(A_ ) == 0:
raise ValueError(F'''`token_ids` has to be a non-empty list, but is {token_ids}.''' )
if any((not isinstance(A_ , A_ ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' )
UpperCamelCase = token_ids
UpperCamelCase = len(self.token_ids )
UpperCamelCase = -1 # the index of the currently fulfilled step
UpperCamelCase = False
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def UpperCAmelCase_ ( self , A_ )-> Optional[int]:
'''simple docstring'''
if not isinstance(A_ , A_ ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(A_ )}''' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def UpperCAmelCase_ ( self , A_ )-> Tuple:
'''simple docstring'''
if not isinstance(A_ , A_ ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(A_ )}''' )
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
if self.does_advance(A_ ):
self.fulfilled_idx += 1
UpperCamelCase = True
if self.fulfilled_idx == (self.seqlen - 1):
UpperCamelCase = True
UpperCamelCase = completed
else:
# failed to make progress.
UpperCamelCase = True
self.reset()
return stepped, completed, reset
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
UpperCamelCase = False
UpperCamelCase = 0
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
return self.seqlen - (self.fulfilled_idx + 1)
def UpperCAmelCase_ ( self , A_=False )-> str:
'''simple docstring'''
UpperCamelCase = PhrasalConstraint(self.token_ids )
if stateful:
UpperCamelCase = self.seqlen
UpperCamelCase = self.fulfilled_idx
UpperCamelCase = self.completed
return new_constraint
class SCREAMING_SNAKE_CASE__ :
def __init__( self , A_ , A_=True )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = max([len(A_ ) for one in nested_token_ids] )
UpperCamelCase = {}
for token_ids in nested_token_ids:
UpperCamelCase = root
for tidx, token_id in enumerate(A_ ):
if token_id not in level:
UpperCamelCase = {}
UpperCamelCase = level[token_id]
if no_subsets and self.has_subsets(A_ , A_ ):
raise ValueError(
'Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'
F''' {nested_token_ids}.''' )
UpperCamelCase = root
def UpperCAmelCase_ ( self , A_ )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = self.trie
for current_token in current_seq:
UpperCamelCase = start[current_token]
UpperCamelCase = list(start.keys() )
return next_tokens
def UpperCAmelCase_ ( self , A_ )-> List[str]:
'''simple docstring'''
UpperCamelCase = self.next_tokens(A_ )
return len(A_ ) == 0
def UpperCAmelCase_ ( self , A_ )-> str:
'''simple docstring'''
UpperCamelCase = list(root.values() )
if len(A_ ) == 0:
return 1
else:
return sum([self.count_leaves(A_ ) for nn in next_nodes] )
def UpperCAmelCase_ ( self , A_ , A_ )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = self.count_leaves(A_ )
return len(A_ ) != leaf_count
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def __init__( self , A_ )-> Optional[int]:
'''simple docstring'''
super(A_ , self ).__init__()
if not isinstance(A_ , A_ ) or len(A_ ) == 0:
raise ValueError(F'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' )
if any(not isinstance(A_ , A_ ) for token_ids in nested_token_ids ):
raise ValueError(F'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' )
if any(
any((not isinstance(A_ , A_ ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' )
UpperCamelCase = DisjunctiveTrie(A_ )
UpperCamelCase = nested_token_ids
UpperCamelCase = self.trie.max_height
UpperCamelCase = []
UpperCamelCase = False
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
UpperCamelCase = self.trie.next_tokens(self.current_seq )
if len(A_ ) == 0:
return None
else:
return token_list
def UpperCAmelCase_ ( self , A_ )-> Any:
'''simple docstring'''
if not isinstance(A_ , A_ ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(A_ )}''' )
UpperCamelCase = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def UpperCAmelCase_ ( self , A_ )-> Any:
'''simple docstring'''
if not isinstance(A_ , A_ ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(A_ )}''' )
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
if self.does_advance(A_ ):
self.current_seq.append(A_ )
UpperCamelCase = True
else:
UpperCamelCase = True
self.reset()
UpperCamelCase = self.trie.reached_leaf(self.current_seq )
UpperCamelCase = completed
return stepped, completed, reset
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = False
UpperCamelCase = []
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def UpperCAmelCase_ ( self , A_=False )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = DisjunctiveConstraint(self.token_ids )
if stateful:
UpperCamelCase = self.seqlen
UpperCamelCase = self.current_seq
UpperCamelCase = self.completed
return new_constraint
class SCREAMING_SNAKE_CASE__ :
def __init__( self , A_ )-> int:
'''simple docstring'''
UpperCamelCase = constraints
# max # of steps required to fulfill a given constraint
UpperCamelCase = max([c.seqlen for c in constraints] )
UpperCamelCase = len(A_ )
UpperCamelCase = False
self.init_state()
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
UpperCamelCase = []
UpperCamelCase = None
UpperCamelCase = [constraint.copy(stateful=A_ ) for constraint in self.constraints]
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
UpperCamelCase = constraint.advance()
if isinstance(A_ , A_ ):
token_list.append(A_ )
elif isinstance(A_ , A_ ):
token_list.extend(A_ )
else:
UpperCamelCase = self.inprogress_constraint.advance()
if isinstance(A_ , A_ ):
token_list.append(A_ )
elif isinstance(A_ , A_ ):
token_list.extend(A_ )
if len(A_ ) == 0:
return None
else:
return token_list
def UpperCAmelCase_ ( self , A_ )-> Tuple:
'''simple docstring'''
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
UpperCamelCase , UpperCamelCase = self.add(A_ )
# the entire list of constraints are fulfilled
if self.completed:
break
def UpperCAmelCase_ ( self , A_ )-> int:
'''simple docstring'''
if not isinstance(A_ , A_ ):
raise ValueError(F'''`token_id` should be an `int`, but is `{token_id}`.''' )
UpperCamelCase , UpperCamelCase = False, False
if self.completed:
UpperCamelCase = True
UpperCamelCase = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
UpperCamelCase , UpperCamelCase , UpperCamelCase = self.inprogress_constraint.update(A_ )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=A_ ) )
UpperCamelCase = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
UpperCamelCase = None
if len(self.pending_constraints ) == 0:
# we're done!
UpperCamelCase = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(A_ ):
UpperCamelCase , UpperCamelCase , UpperCamelCase = pending_constraint.update(A_ )
if not stepped:
raise Exception(
'`constraint.update(token_id)` is not yielding incremental progress, '
'even though `constraint.does_advance(token_id)` is true.' )
if complete:
self.complete_constraints.append(A_ )
UpperCamelCase = None
if not complete and stepped:
UpperCamelCase = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
UpperCamelCase = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
UpperCamelCase = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def UpperCAmelCase_ ( self , A_=True )-> Dict:
'''simple docstring'''
UpperCamelCase = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
UpperCamelCase = [
constraint.copy(stateful=A_ ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
UpperCamelCase = self.inprogress_constraint.copy(stateful=A_ )
UpperCamelCase = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 3 |
from typing import Any
import numpy as np
def SCREAMING_SNAKE_CASE__ ( snake_case__ :np.ndarray ) -> bool:
return np.array_equal(snake_case__ , matrix.conjugate().T )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :np.ndarray , snake_case__ :np.ndarray ) -> Any:
_lowercase = v.conjugate().T
_lowercase = v_star.dot(snake_case__ )
assert isinstance(snake_case__ , np.ndarray )
return (v_star_dot.dot(snake_case__ )) / (v_star.dot(snake_case__ ))
def SCREAMING_SNAKE_CASE__ ( ) -> None:
_lowercase = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
_lowercase = np.array([[1], [2], [3]] )
assert is_hermitian(snake_case__ ), F"""{a} is not hermitian."""
print(rayleigh_quotient(snake_case__ , snake_case__ ) )
_lowercase = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(snake_case__ ), F"""{a} is not hermitian."""
assert rayleigh_quotient(snake_case__ , snake_case__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests() | 67 | 0 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__UpperCamelCase : int = logging.get_logger(__name__)
__UpperCamelCase : Optional[Any] = {'''vocab_file''': '''spiece.model'''}
__UpperCamelCase : int = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
}
}
__UpperCamelCase : str = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
# Segments (not really needed)
__UpperCamelCase : Any = 0
__UpperCamelCase : str = 1
__UpperCamelCase : List[Any] = 2
__UpperCamelCase : Dict = 3
__UpperCamelCase : Dict = 4
class a ( a__ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = '''left'''
def __init__( self , _snake_case , _snake_case=False , _snake_case=True , _snake_case=False , _snake_case="<s>" , _snake_case="</s>" , _snake_case="<unk>" , _snake_case="<sep>" , _snake_case="<pad>" , _snake_case="<cls>" , _snake_case="<mask>" , _snake_case=["<eop>", "<eod>"] , _snake_case = None , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else mask_token
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_snake_case , remove_space=_snake_case , keep_accents=_snake_case , bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , additional_special_tokens=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , )
lowerCAmelCase = 3
lowerCAmelCase = do_lower_case
lowerCAmelCase = remove_space
lowerCAmelCase = keep_accents
lowerCAmelCase = vocab_file
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_snake_case )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return len(self.sp_model )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if self.remove_space:
lowerCAmelCase = ' '.join(inputs.strip().split() )
else:
lowerCAmelCase = inputs
lowerCAmelCase = outputs.replace('``' , '"' ).replace('\'\'' , '"' )
if not self.keep_accents:
lowerCAmelCase = unicodedata.normalize('NFKD' , _snake_case )
lowerCAmelCase = ''.join([c for c in outputs if not unicodedata.combining(_snake_case )] )
if self.do_lower_case:
lowerCAmelCase = outputs.lower()
return outputs
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.preprocess_text(_snake_case )
lowerCAmelCase = self.sp_model.encode(_snake_case , out_type=_snake_case )
lowerCAmelCase = []
for piece in pieces:
if len(_snake_case ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
lowerCAmelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(_snake_case , '' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCAmelCase = cur_pieces[1:]
else:
lowerCAmelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_snake_case )
else:
new_pieces.append(_snake_case )
return new_pieces
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return self.sp_model.PieceToId(_snake_case )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return self.sp_model.IdToPiece(_snake_case )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = ''.join(_snake_case ).replace(_snake_case , ' ' ).strip()
return out_string
def UpperCamelCase__ ( self , _snake_case , _snake_case = False , _snake_case = None , _snake_case = True , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = kwargs.pop('use_source_tokenizer' , _snake_case )
lowerCAmelCase = self.convert_ids_to_tokens(_snake_case , skip_special_tokens=_snake_case )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
lowerCAmelCase = []
lowerCAmelCase = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_snake_case ) )
lowerCAmelCase = []
sub_texts.append(_snake_case )
else:
current_sub_text.append(_snake_case )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_snake_case ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
lowerCAmelCase = ''.join(_snake_case )
lowerCAmelCase = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
lowerCAmelCase = self.clean_up_tokenization(_snake_case )
return clean_text
else:
return text
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCamelCase__ ( self , _snake_case , _snake_case = None , _snake_case = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case , token_ids_a=_snake_case , already_has_special_tokens=_snake_case )
if token_ids_a is not None:
return ([0] * len(_snake_case )) + [1] + ([0] * len(_snake_case )) + [1, 1]
return ([0] * len(_snake_case )) + [1, 1]
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
if not os.path.isdir(_snake_case ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase = os.path.join(
_snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(_snake_case , 'wb' ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (out_vocab_file,)
| 4 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple ,__A : Dict ,__A : List[Any]=7 ,__A : Dict=3 ,__A : Tuple=30 ,__A : Dict=400 ,__A : Any=True ,__A : List[Any]=None ,__A : Any=True ,__A : List[str]=[0.5, 0.5, 0.5] ,__A : Union[str, Any]=[0.5, 0.5, 0.5] ,__A : int=True ,__A : List[str]=1 / 255 ,__A : Union[str, Any]=True ,) -> List[Any]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_lowercase = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
_lowercase = parent
_lowercase = batch_size
_lowercase = num_channels
_lowercase = min_resolution
_lowercase = max_resolution
_lowercase = do_resize
_lowercase = size
_lowercase = do_normalize
_lowercase = image_mean
_lowercase = image_std
_lowercase = do_rescale
_lowercase = rescale_factor
_lowercase = do_pad
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __UpperCAmelCase ( self : Tuple ,__A : Union[str, Any] ,__A : List[str]=False ) -> Union[str, Any]:
if not batched:
_lowercase = image_inputs[0]
if isinstance(__A ,Image.Image ):
_lowercase , _lowercase = image.size
else:
_lowercase , _lowercase = image.shape[1], image.shape[2]
if w < h:
_lowercase = int(self.size['shortest_edge'] * h / w )
_lowercase = self.size['shortest_edge']
elif w > h:
_lowercase = self.size['shortest_edge']
_lowercase = int(self.size['shortest_edge'] * w / h )
else:
_lowercase = self.size['shortest_edge']
_lowercase = self.size['shortest_edge']
else:
_lowercase = []
for image in image_inputs:
_lowercase , _lowercase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_lowercase = max(__A ,key=lambda __A : item[0] )[0]
_lowercase = max(__A ,key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A_ ( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = DetaImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
_lowercase = DetaImageProcessingTester(self )
@property
def __UpperCAmelCase ( self : List[Any] ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
_lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A ,'image_mean' ) )
self.assertTrue(hasattr(__A ,'image_std' ) )
self.assertTrue(hasattr(__A ,'do_normalize' ) )
self.assertTrue(hasattr(__A ,'do_resize' ) )
self.assertTrue(hasattr(__A ,'do_rescale' ) )
self.assertTrue(hasattr(__A ,'do_pad' ) )
self.assertTrue(hasattr(__A ,'size' ) )
def __UpperCAmelCase ( self : str ) -> List[str]:
_lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad ,__A )
def __UpperCAmelCase ( self : List[Any] ) -> Any:
pass
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A ,Image.Image )
# Test not batched input
_lowercase = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A ,batched=__A )
_lowercase = image_processing(__A ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__A ,numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A ,np.ndarray )
# Test not batched input
_lowercase = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
_lowercase = image_processing(__A ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A ,batched=__A )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def __UpperCAmelCase ( self : Optional[Any] ) -> Any:
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__A ,torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A ,torch.Tensor )
# Test not batched input
_lowercase = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
_lowercase = image_processing(__A ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A ,batched=__A )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
@slow
def __UpperCAmelCase ( self : List[str] ) -> List[str]:
# prepare image and target
_lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' ,'r' ) as f:
_lowercase = json.loads(f.read() )
_lowercase = {'image_id': 3_9769, 'annotations': target}
# encode them
_lowercase = DetaImageProcessor()
_lowercase = image_processing(images=__A ,annotations=__A ,return_tensors='pt' )
# verify pixel values
_lowercase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape ,__A )
_lowercase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,__A ,atol=1e-4 ) )
# verify area
_lowercase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,__A ) )
# verify boxes
_lowercase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape ,__A )
_lowercase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,__A ,atol=1e-3 ) )
# verify image_id
_lowercase = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,__A ) )
# verify is_crowd
_lowercase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,__A ) )
# verify class_labels
_lowercase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,__A ) )
# verify orig_size
_lowercase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,__A ) )
# verify size
_lowercase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,__A ) )
@slow
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
# prepare image, target and masks_path
_lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' ,'r' ) as f:
_lowercase = json.loads(f.read() )
_lowercase = {'file_name': '000000039769.png', 'image_id': 3_9769, 'segments_info': target}
_lowercase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
_lowercase = DetaImageProcessor(format='coco_panoptic' )
_lowercase = image_processing(images=__A ,annotations=__A ,masks_path=__A ,return_tensors='pt' )
# verify pixel values
_lowercase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape ,__A )
_lowercase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,__A ,atol=1e-4 ) )
# verify area
_lowercase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,__A ) )
# verify boxes
_lowercase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape ,__A )
_lowercase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,__A ,atol=1e-3 ) )
# verify image_id
_lowercase = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,__A ) )
# verify is_crowd
_lowercase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,__A ) )
# verify class_labels
_lowercase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,__A ) )
# verify masks
_lowercase = 82_2873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() ,__A )
# verify orig_size
_lowercase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,__A ) )
# verify size
_lowercase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,__A ) ) | 67 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase = {"""configuration_mbart""": ["""MBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MBartConfig""", """MBartOnnxConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ["""MBartTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ["""MBartTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""MBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MBartForCausalLM""",
"""MBartForConditionalGeneration""",
"""MBartForQuestionAnswering""",
"""MBartForSequenceClassification""",
"""MBartModel""",
"""MBartPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""TFMBartForConditionalGeneration""",
"""TFMBartModel""",
"""TFMBartPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""FlaxMBartForConditionalGeneration""",
"""FlaxMBartForQuestionAnswering""",
"""FlaxMBartForSequenceClassification""",
"""FlaxMBartModel""",
"""FlaxMBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 5 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("""At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training""")
# TF training parameters
snake_case = False
snake_case = False
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Namespace ) -> Tuple:
return TrainCommand(snake_case__ )
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@staticmethod
def __UpperCAmelCase ( __A : ArgumentParser ) -> List[Any]:
_lowercase = parser.add_parser('train' ,help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data' ,type=__A ,required=__A ,help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' ,)
train_parser.add_argument(
'--column_label' ,type=__A ,default=0 ,help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text' ,type=__A ,default=1 ,help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id' ,type=__A ,default=2 ,help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row' ,action='store_true' ,help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data' ,type=__A ,default='' ,help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split' ,type=__A ,default=0.1 ,help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' ,)
train_parser.add_argument('--output' ,type=__A ,default='./' ,help='path to saved the trained model.' )
train_parser.add_argument(
'--task' ,type=__A ,default='text_classification' ,help='Task to train the model on.' )
train_parser.add_argument(
'--model' ,type=__A ,default='bert-base-uncased' ,help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size' ,type=__A ,default=32 ,help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size' ,type=__A ,default=64 ,help='Batch size for validation.' )
train_parser.add_argument('--learning_rate' ,type=__A ,default=3e-5 ,help='Learning rate.' )
train_parser.add_argument('--adam_epsilon' ,type=__A ,default=1e-08 ,help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=__A )
def __init__( self : Optional[Any] ,__A : Namespace ) -> Tuple:
_lowercase = logging.get_logger('transformers-cli/training' )
_lowercase = 'tf' if is_tf_available() else 'torch'
os.makedirs(args.output ,exist_ok=__A )
_lowercase = args.output
_lowercase = args.column_label
_lowercase = args.column_text
_lowercase = args.column_id
self.logger.info(F"""Loading {args.task} pipeline for {args.model}""" )
if args.task == "text_classification":
_lowercase = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F"""Loading dataset from {args.train_data}""" )
_lowercase = Processor.create_from_csv(
args.train_data ,column_label=args.column_label ,column_text=args.column_text ,column_id=args.column_id ,skip_first_row=args.skip_first_row ,)
_lowercase = None
if args.validation_data:
self.logger.info(F"""Loading validation dataset from {args.validation_data}""" )
_lowercase = Processor.create_from_csv(
args.validation_data ,column_label=args.column_label ,column_text=args.column_text ,column_id=args.column_id ,skip_first_row=args.skip_first_row ,)
_lowercase = args.validation_split
_lowercase = args.train_batch_size
_lowercase = args.valid_batch_size
_lowercase = args.learning_rate
_lowercase = args.adam_epsilon
def __UpperCAmelCase ( self : Optional[Any] ) -> str:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
raise NotImplementedError
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
self.pipeline.fit(
self.train_dataset ,validation_data=self.valid_dataset ,validation_split=self.validation_split ,learning_rate=self.learning_rate ,adam_epsilon=self.adam_epsilon ,train_batch_size=self.train_batch_size ,valid_batch_size=self.valid_batch_size ,)
# Save trained pipeline
self.pipeline.save_pretrained(self.output ) | 67 | 0 |
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: float , UpperCamelCase__: float ):
if mass < 0:
raise ValueError("""The mass of a body cannot be negative""" )
return 0.5 * mass * abs(UpperCamelCase__ ) * abs(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) | 6 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Any ) -> str:
_lowercase = DPTConfig(embedding_type='hybrid' )
if "large" in checkpoint_url:
_lowercase = 1024
_lowercase = 4096
_lowercase = 24
_lowercase = 16
_lowercase = [5, 11, 17, 23]
_lowercase = [256, 512, 1024, 1024]
_lowercase = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
_lowercase = 768
_lowercase = [1, 1, 1, 0.5]
_lowercase = [256, 512, 768, 768]
_lowercase = 150
_lowercase = 16
_lowercase = (1, 384, 384)
_lowercase = False
_lowercase = 'project'
if "ade" in checkpoint_url:
_lowercase = True
_lowercase = 768
_lowercase = [1, 1, 1, 0.5]
_lowercase = 150
_lowercase = 16
_lowercase = 'huggingface/label-files'
_lowercase = 'ade20k-id2label.json'
_lowercase = json.load(open(cached_download(hf_hub_url(snake_case__ , snake_case__ , repo_type='dataset' ) ) , 'r' ) )
_lowercase = {int(snake_case__ ): v for k, v in idalabel.items()}
_lowercase = idalabel
_lowercase = {v: k for k, v in idalabel.items()}
_lowercase = [1, 150, 480, 480]
return config, expected_shape
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> str:
_lowercase = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[Any] ) -> Any:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
_lowercase = name.replace('pretrained.model' , 'dpt.encoder' )
if "pretrained.model" in name:
_lowercase = name.replace('pretrained.model' , 'dpt.embeddings' )
if "patch_embed" in name:
_lowercase = name.replace('patch_embed' , '' )
if "pos_embed" in name:
_lowercase = name.replace('pos_embed' , 'position_embeddings' )
if "attn.proj" in name:
_lowercase = name.replace('attn.proj' , 'attention.output.dense' )
if "proj" in name and "project" not in name:
_lowercase = name.replace('proj' , 'projection' )
if "blocks" in name:
_lowercase = name.replace('blocks' , 'layer' )
if "mlp.fc1" in name:
_lowercase = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_lowercase = name.replace('mlp.fc2' , 'output.dense' )
if "norm1" in name and "backbone" not in name:
_lowercase = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name and "backbone" not in name:
_lowercase = name.replace('norm2' , 'layernorm_after' )
if "scratch.output_conv" in name:
_lowercase = name.replace('scratch.output_conv' , 'head' )
if "scratch" in name:
_lowercase = name.replace('scratch' , 'neck' )
if "layer1_rn" in name:
_lowercase = name.replace('layer1_rn' , 'convs.0' )
if "layer2_rn" in name:
_lowercase = name.replace('layer2_rn' , 'convs.1' )
if "layer3_rn" in name:
_lowercase = name.replace('layer3_rn' , 'convs.2' )
if "layer4_rn" in name:
_lowercase = name.replace('layer4_rn' , 'convs.3' )
if "refinenet" in name:
_lowercase = int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
_lowercase = name.replace(F"""refinenet{layer_idx}""" , F"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
_lowercase = name.replace('out_conv' , 'projection' )
if "resConfUnit1" in name:
_lowercase = name.replace('resConfUnit1' , 'residual_layer1' )
if "resConfUnit2" in name:
_lowercase = name.replace('resConfUnit2' , 'residual_layer2' )
if "conv1" in name:
_lowercase = name.replace('conv1' , 'convolution1' )
if "conv2" in name:
_lowercase = name.replace('conv2' , 'convolution2' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
_lowercase = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0' )
if "pretrained.act_postprocess2.0.project.0" in name:
_lowercase = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0' )
if "pretrained.act_postprocess3.0.project.0" in name:
_lowercase = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0' )
if "pretrained.act_postprocess4.0.project.0" in name:
_lowercase = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
_lowercase = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection' )
if "pretrained.act_postprocess1.4" in name:
_lowercase = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize' )
if "pretrained.act_postprocess2.3" in name:
_lowercase = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection' )
if "pretrained.act_postprocess2.4" in name:
_lowercase = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize' )
if "pretrained.act_postprocess3.3" in name:
_lowercase = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection' )
if "pretrained.act_postprocess4.3" in name:
_lowercase = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection' )
if "pretrained.act_postprocess4.4" in name:
_lowercase = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize' )
if "pretrained" in name:
_lowercase = name.replace('pretrained' , 'dpt' )
if "bn" in name:
_lowercase = name.replace('bn' , 'batch_norm' )
if "head" in name:
_lowercase = name.replace('head' , 'head.head' )
if "encoder.norm" in name:
_lowercase = name.replace('encoder.norm' , 'layernorm' )
if "auxlayer" in name:
_lowercase = name.replace('auxlayer' , 'auxiliary_head.head' )
if "backbone" in name:
_lowercase = name.replace('backbone' , 'backbone.bit.encoder' )
if ".." in name:
_lowercase = name.replace('..' , '.' )
if "stem.conv" in name:
_lowercase = name.replace('stem.conv' , 'bit.embedder.convolution' )
if "blocks" in name:
_lowercase = name.replace('blocks' , 'layers' )
if "convolution" in name and "backbone" in name:
_lowercase = name.replace('convolution' , 'conv' )
if "layer" in name and "backbone" in name:
_lowercase = name.replace('layer' , 'layers' )
if "backbone.bit.encoder.bit" in name:
_lowercase = name.replace('backbone.bit.encoder.bit' , 'backbone.bit' )
if "embedder.conv" in name:
_lowercase = name.replace('embedder.conv' , 'embedder.convolution' )
if "backbone.bit.encoder.stem.norm" in name:
_lowercase = name.replace('backbone.bit.encoder.stem.norm' , 'backbone.bit.embedder.norm' )
return name
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[str] , snake_case__ :int ) -> Dict:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowercase = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
_lowercase = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowercase = in_proj_weight[: config.hidden_size, :]
_lowercase = in_proj_bias[: config.hidden_size]
_lowercase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowercase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowercase = in_proj_weight[
-config.hidden_size :, :
]
_lowercase = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
_lowercase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowercase = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[int] , snake_case__ :List[Any] , snake_case__ :str , snake_case__ :Any , snake_case__ :List[str] ) -> str:
_lowercase , _lowercase = get_dpt_config(snake_case__ )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
_lowercase = torch.load(snake_case__ , map_location='cpu' )
# remove certain keys
remove_ignore_keys_(snake_case__ )
# rename keys
for key in state_dict.copy().keys():
_lowercase = state_dict.pop(snake_case__ )
_lowercase = val
# read in qkv matrices
read_in_q_k_v(snake_case__ , snake_case__ )
# load HuggingFace model
_lowercase = DPTForSemanticSegmentation(snake_case__ ) if 'ade' in checkpoint_url else DPTForDepthEstimation(snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
# Check outputs on an image
_lowercase = 480 if 'ade' in checkpoint_url else 384
_lowercase = DPTImageProcessor(size=snake_case__ )
_lowercase = prepare_img()
_lowercase = image_processor(snake_case__ , return_tensors='pt' )
# forward pass
_lowercase = model(**snake_case__ ).logits if 'ade' in checkpoint_url else model(**snake_case__ ).predicted_depth
if show_prediction:
_lowercase = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='bicubic' , align_corners=snake_case__ , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(snake_case__ )
if push_to_hub:
model.push_to_hub('ybelkada/dpt-hybrid-midas' )
image_processor.push_to_hub('ybelkada/dpt-hybrid-midas' )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you're pushing to the hub.""",
)
parser.add_argument(
"""--show_prediction""",
action="""store_true""",
)
snake_case = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
) | 67 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a = logging.get_logger(__name__)
def _snake_case ( _snake_case : List[Any] , _snake_case : Dict=False , _snake_case : Optional[int]=False , _snake_case : str=False ) -> Optional[Any]:
'''simple docstring'''
_A = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''transformer.blocks.{i}.norm1.weight''', F'''vilt.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''transformer.blocks.{i}.norm1.bias''', F'''vilt.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''transformer.blocks.{i}.attn.proj.weight''', F'''vilt.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(F'''transformer.blocks.{i}.attn.proj.bias''', F'''vilt.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''transformer.blocks.{i}.norm2.weight''', F'''vilt.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''transformer.blocks.{i}.norm2.bias''', F'''vilt.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(F'''transformer.blocks.{i}.mlp.fc1.weight''', F'''vilt.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''transformer.blocks.{i}.mlp.fc1.bias''', F'''vilt.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''transformer.blocks.{i}.mlp.fc2.weight''', F'''vilt.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''transformer.blocks.{i}.mlp.fc2.bias''', F'''vilt.encoder.layer.{i}.output.dense.bias''') )
# embeddings
rename_keys.extend(
[
# text embeddings
('text_embeddings.word_embeddings.weight', 'vilt.embeddings.text_embeddings.word_embeddings.weight'),
(
'text_embeddings.position_embeddings.weight',
'vilt.embeddings.text_embeddings.position_embeddings.weight',
),
('text_embeddings.position_ids', 'vilt.embeddings.text_embeddings.position_ids'),
(
'text_embeddings.token_type_embeddings.weight',
'vilt.embeddings.text_embeddings.token_type_embeddings.weight',
),
('text_embeddings.LayerNorm.weight', 'vilt.embeddings.text_embeddings.LayerNorm.weight'),
('text_embeddings.LayerNorm.bias', 'vilt.embeddings.text_embeddings.LayerNorm.bias'),
# patch embeddings
('transformer.cls_token', 'vilt.embeddings.cls_token'),
('transformer.patch_embed.proj.weight', 'vilt.embeddings.patch_embeddings.projection.weight'),
('transformer.patch_embed.proj.bias', 'vilt.embeddings.patch_embeddings.projection.bias'),
('transformer.pos_embed', 'vilt.embeddings.position_embeddings'),
# token type embeddings
('token_type_embeddings.weight', 'vilt.embeddings.token_type_embeddings.weight'),
] )
# final layernorm + pooler
rename_keys.extend(
[
('transformer.norm.weight', 'vilt.layernorm.weight'),
('transformer.norm.bias', 'vilt.layernorm.bias'),
('pooler.dense.weight', 'vilt.pooler.dense.weight'),
('pooler.dense.bias', 'vilt.pooler.dense.bias'),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('vqa_classifier.0.weight', 'classifier.0.weight'),
('vqa_classifier.0.bias', 'classifier.0.bias'),
('vqa_classifier.1.weight', 'classifier.1.weight'),
('vqa_classifier.1.bias', 'classifier.1.bias'),
('vqa_classifier.3.weight', 'classifier.3.weight'),
('vqa_classifier.3.bias', 'classifier.3.bias'),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('nlvr2_classifier.0.weight', 'classifier.0.weight'),
('nlvr2_classifier.0.bias', 'classifier.0.bias'),
('nlvr2_classifier.1.weight', 'classifier.1.weight'),
('nlvr2_classifier.1.bias', 'classifier.1.bias'),
('nlvr2_classifier.3.weight', 'classifier.3.weight'),
('nlvr2_classifier.3.bias', 'classifier.3.bias'),
] )
else:
pass
return rename_keys
def _snake_case ( _snake_case : int , _snake_case : int ) -> Any:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
_A = 'vilt.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_A = state_dict.pop(F'''transformer.blocks.{i}.attn.qkv.weight''' )
_A = state_dict.pop(F'''transformer.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_A = in_proj_weight[
: config.hidden_size, :
]
_A = in_proj_bias[: config.hidden_size]
_A = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_A = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_A = in_proj_weight[
-config.hidden_size :, :
]
_A = in_proj_bias[-config.hidden_size :]
def _snake_case ( _snake_case : Tuple ) -> str:
'''simple docstring'''
_A = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(_snake_case , _snake_case )
def _snake_case ( _snake_case : Dict , _snake_case : str , _snake_case : Tuple ) -> str:
'''simple docstring'''
_A = dct.pop(_snake_case )
_A = val
@torch.no_grad()
def _snake_case ( _snake_case : List[str] , _snake_case : Dict ) -> List[str]:
'''simple docstring'''
_A = ViltConfig(image_size=3_84 , patch_size=32 , tie_word_embeddings=_snake_case )
_A = False
_A = False
_A = False
_A = False
if "vqa" in checkpoint_url:
_A = True
_A = 31_29
_A = 'huggingface/label-files'
_A = 'vqa2-id2label.json'
_A = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type='dataset' ) , 'r' ) )
_A = {int(_snake_case ): v for k, v in idalabel.items()}
_A = idalabel
_A = {v: k for k, v in idalabel.items()}
_A = ViltForQuestionAnswering(_snake_case )
elif "nlvr" in checkpoint_url:
_A = True
_A = 2
_A = {0: 'False', 1: 'True'}
_A = {v: k for k, v in config.idalabel.items()}
_A = 3
_A = ViltForImagesAndTextClassification(_snake_case )
elif "irtr" in checkpoint_url:
_A = True
_A = ViltForImageAndTextRetrieval(_snake_case )
elif "mlm_itm" in checkpoint_url:
_A = True
_A = ViltForMaskedLM(_snake_case )
else:
raise ValueError('Unknown model type' )
# load state_dict of original model, remove and rename some keys
_A = torch.hub.load_state_dict_from_url(_snake_case , map_location='cpu' )['state_dict']
_A = create_rename_keys(_snake_case , _snake_case , _snake_case , _snake_case )
for src, dest in rename_keys:
rename_key(_snake_case , _snake_case , _snake_case )
read_in_q_k_v(_snake_case , _snake_case )
if mlm_model or irtr_model:
_A = ['itm_score.fc.weight', 'itm_score.fc.bias']
for k in ignore_keys:
state_dict.pop(_snake_case , _snake_case )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
_A , _A = model.load_state_dict(_snake_case , strict=_snake_case )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(_snake_case )
# Define processor
_A = ViltImageProcessor(size=3_84 )
_A = BertTokenizer.from_pretrained('bert-base-uncased' )
_A = ViltProcessor(_snake_case , _snake_case )
# Forward pass on example inputs (image + text)
if nlvr_model:
_A = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=_snake_case ).raw )
_A = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=_snake_case ).raw )
_A = (
'The left image contains twice the number of dogs as the right image, and at least two dogs in total are'
' standing.'
)
_A = processor(_snake_case , _snake_case , return_tensors='pt' )
_A = processor(_snake_case , _snake_case , return_tensors='pt' )
_A = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
_A = Image.open(requests.get('http://images.cocodataset.org/val2017/000000039769.jpg' , stream=_snake_case ).raw )
if mlm_model:
_A = 'a bunch of [MASK] laying on a [MASK].'
else:
_A = 'How many cats are there?'
_A = processor(_snake_case , _snake_case , return_tensors='pt' )
_A = model(**_snake_case )
# Verify outputs
if mlm_model:
_A = torch.Size([1, 11, 3_05_22] )
_A = torch.tensor([-12.5061, -12.5123, -12.5174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , _snake_case , atol=1E-4 )
# verify masked token prediction equals "cats"
_A = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
_A = torch.Size([1, 31_29] )
_A = torch.tensor([-15.9495, -18.1472, -10.3041] )
assert torch.allclose(outputs.logits[0, :3] , _snake_case , atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , _snake_case , atol=1E-4 )
# verify vqa prediction equals "2"
_A = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
_A = torch.Size([1, 2] )
_A = torch.tensor([-2.8721, 2.1291] )
assert torch.allclose(outputs.logits[0, :3] , _snake_case , atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(_snake_case ).mkdir(exist_ok=_snake_case )
print(F'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
processor.save_pretrained(_snake_case )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
a = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 7 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
snake_case = {"""configuration_vit_mae""": ["""VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMAEConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMAEForPreTraining""",
"""ViTMAELayer""",
"""ViTMAEModel""",
"""ViTMAEPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""TFViTMAEForPreTraining""",
"""TFViTMAEModel""",
"""TFViTMAEPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 67 | 0 |
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase__ : Optional[int] = logging.get_logger(__name__)
lowercase__ : int = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
lowercase__ : Tuple = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
lowercase__ : str = {
'''abeja/gpt-neox-japanese-2.7b''': 20_48,
}
def _lowerCAmelCase ( __snake_case : Dict , __snake_case : Dict ) -> Any:
with open(__snake_case , 'r' , encoding='utf-8' ) as f:
__A : Optional[int] = json.loads(f.read() )
__A : Any = collections.OrderedDict()
__A : Optional[Any] = collections.OrderedDict()
__A : Union[str, Any] = collections.OrderedDict()
with open(__snake_case , 'r' , encoding='utf-8' ) as f:
__A : Dict = f.readlines()
__A : Tuple = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token]
for idx, b in enumerate(__snake_case ):
__A : int = b
__A : int = idx
for wd in b:
__A : Any = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = ['''input_ids''', '''attention_mask''']
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="<|endoftext|>" , _UpperCAmelCase="<|endoftext|>" , _UpperCAmelCase="<|startoftext|>" , _UpperCAmelCase="<|endoftext|>" , _UpperCAmelCase=False , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , do_clean_text=_UpperCAmelCase , **_UpperCAmelCase , )
if not os.path.isfile(_UpperCAmelCase):
raise ValueError(
F'Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'
' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`')
if not os.path.isfile(_UpperCAmelCase):
raise ValueError(
F'Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'
' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`')
__A : Tuple = do_clean_text
__A ,__A ,__A ,__A : Dict = load_vocab_and_emoji(_UpperCAmelCase , _UpperCAmelCase)
__A : int = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji)
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return len(self.raw_vocab)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return dict(self.raw_vocab , **self.added_tokens_encoder)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return self.subword_tokenizer.tokenize(_UpperCAmelCase , clean=self.do_clean_text)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return self.vocab.get(_UpperCAmelCase , self.vocab.get(self.unk_token))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return self.subword_tokenizer.convert_id_to_token(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : int = ''.join(_UpperCAmelCase).strip()
return out_string
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : int = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase) + [self.eos_token_id])
if len(_UpperCAmelCase) > self.model_max_length:
__A : Optional[int] = input_ids[-self.model_max_length :]
return input_ids
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
__A : List[str] = 0
if os.path.isdir(_UpperCAmelCase):
__A : Dict = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
__A : int = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'])
else:
__A : Tuple = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file']
)
__A : int = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file']
)
with open(_UpperCAmelCase , 'w' , encoding='utf-8') as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
' Please check that the vocabulary is not corrupted!')
__A : Dict = token_index
writer.write(','.join(_UpperCAmelCase) + '\n')
index += 1
with open(_UpperCAmelCase , 'w' , encoding='utf-8') as writer:
json.dump(self.emoji , _UpperCAmelCase)
return vocab_file, emoji_file
class SCREAMING_SNAKE_CASE (a__ ):
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : int = vocab # same as swe
__A : Union[str, Any] = ids_to_tokens # same as bpe
__A : Optional[int] = emoji
__A : List[str] = np.max([len(_UpperCAmelCase) for w in self.vocab.keys()])
__A : Optional[int] = re.compile(R'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)')
__A : Optional[Any] = re.compile(R'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*')
__A : str = re.compile(R'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}')
__A : Any = re.compile(
R'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*')
__A : List[str] = re.compile(
R'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*')
__A : Any = re.compile(
R'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*')
__A : str = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'
__A : Dict = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'
__A : int = str.maketrans({k: '<BLOCK>' for k in keisen + blocks})
def __len__( self):
'''simple docstring'''
return len(self.ids_to_tokens)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Tuple = self.content_repattera.sub('<URL>' , _UpperCAmelCase)
__A : Any = self.content_repattera.sub('<EMAIL>' , _UpperCAmelCase)
__A : Union[str, Any] = self.content_repattera.sub('<TEL>' , _UpperCAmelCase)
__A : List[str] = self.content_repattera.sub('<DATE>' , _UpperCAmelCase)
__A : Optional[Any] = self.content_repattera.sub('<DATE>' , _UpperCAmelCase)
__A : Optional[int] = self.content_repattera.sub('<PRICE>' , _UpperCAmelCase)
__A : List[str] = content.translate(self.content_transa)
while "<BLOCK><BLOCK>" in content:
__A : str = content.replace('<BLOCK><BLOCK>' , '<BLOCK>')
return content
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase=False):
'''simple docstring'''
__A : Union[str, Any] = text.replace(' ' , '<SP>')
__A : Tuple = text.replace(' ' , '<SP>')
__A : Optional[Any] = text.replace('\r\n' , '<BR>')
__A : Tuple = text.replace('\n' , '<BR>')
__A : List[Any] = text.replace('\r' , '<BR>')
__A : Optional[Any] = text.replace('\t' , '<TAB>')
__A : Union[str, Any] = text.replace('—' , 'ー')
__A : int = text.replace('−' , 'ー')
for k, v in self.emoji["emoji"].items():
if k in text:
__A : Union[str, Any] = text.replace(_UpperCAmelCase , _UpperCAmelCase)
if clean:
__A : int = self.clean_text(_UpperCAmelCase)
def check_simbol(_UpperCAmelCase):
__A : str = x.encode()
if len(_UpperCAmelCase) == 1 and len(_UpperCAmelCase) == 2:
__A : Dict = (int(e[0]) << 8) + int(e[1])
if (
(c >= 0Xc_2_a_1 and c <= 0Xc_2_b_f)
or (c >= 0Xc_7_8_0 and c <= 0Xc_7_8_3)
or (c >= 0Xc_a_b_9 and c <= 0Xc_b_b_f)
or (c >= 0Xc_c_8_0 and c <= 0Xc_d_a_2)
):
return True
return False
def checkuae(_UpperCAmelCase):
__A : Optional[int] = x.encode()
if len(_UpperCAmelCase) == 1 and len(_UpperCAmelCase) == 3:
__A : Dict = (int(e[0]) << 16) + (int(e[1]) << 8) + int(e[2])
if c >= 0Xe_2_8_0_8_0 and c <= 0Xe_2_b_0_7_f:
return True
return False
__A : Union[str, Any] = 0
__A : int = []
while pos < len(_UpperCAmelCase):
__A : Optional[int] = min(len(_UpperCAmelCase) , pos + self.maxlen + 1) if text[pos] == '<' else pos + 3
__A : Dict = [] # (token_id, token, pos)
for e in range(_UpperCAmelCase , _UpperCAmelCase , -1):
__A : List[str] = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(_UpperCAmelCase) > 2:
__A : Union[str, Any] = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e))
if len(_UpperCAmelCase) > 0:
# the smallest token_id is adopted
__A ,__A ,__A : List[Any] = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase: x[0])[0]
result.append(_UpperCAmelCase)
__A : Optional[int] = e
else:
__A : int = pos + 1
__A : List[Any] = text[pos:end]
if check_simbol(_UpperCAmelCase):
result.append('<KIGOU>')
elif checkuae(_UpperCAmelCase):
result.append('<U2000U2BFF>')
else:
for i in wd.encode('utf-8'):
result.append('<|byte%d|>' % i)
__A : Optional[Any] = end
return result
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase="\n"):
'''simple docstring'''
__A : Optional[int] = []
__A : Tuple = []
__A : Any = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2]))
else:
if len(_UpperCAmelCase) > 0:
words.append(bytearray(_UpperCAmelCase).decode('utf-8' , errors='replace'))
__A : Tuple = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['emoji_inv'][word])
elif word == "<SP>":
words.append(' ')
elif word == "<BR>":
words.append(_UpperCAmelCase)
elif word == "<TAB>":
words.append('\t')
elif word == "<BLOCK>":
words.append('▀')
elif word == "<KIGOU>":
words.append('ǀ')
elif word == "<U2000U2BFF>":
words.append('‖')
else:
words.append(_UpperCAmelCase)
if len(_UpperCAmelCase) > 0:
words.append(bytearray(_UpperCAmelCase).decode('utf-8' , errors='replace'))
__A : Optional[int] = ''.join(_UpperCAmelCase)
return text | 8 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline | 67 | 0 |
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('''hub/hopper-medium-v2/unet/hor32''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/unet/hor128''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/value_function''', exist_ok=True)
def A ( __UpperCamelCase ) -> Union[str, Any]:
if hor == 128:
A__ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
A__ = (32, 128, 256)
A__ = ('UpResnetBlock1D', 'UpResnetBlock1D')
elif hor == 32:
A__ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
A__ = (32, 64, 128, 256)
A__ = ('UpResnetBlock1D', 'UpResnetBlock1D', 'UpResnetBlock1D')
A__ = torch.load(f'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' )
A__ = model.state_dict()
A__ = {
'down_block_types': down_block_types,
'block_out_channels': block_out_channels,
'up_block_types': up_block_types,
'layers_per_block': 1,
'use_timestep_embedding': True,
'out_block_type': 'OutConv1DBlock',
'norm_num_groups': 8,
'downsample_each_block': False,
'in_channels': 14,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'flip_sin_to_cos': False,
'freq_shift': 1,
'sample_size': 65_536,
'mid_block_type': 'MidResTemporalBlock1D',
'act_fn': 'mish',
}
A__ = UNetaDModel(**__UpperCamelCase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
A__ = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
A__ = state_dict.pop(__UpperCamelCase )
hf_value_function.load_state_dict(__UpperCamelCase )
torch.save(hf_value_function.state_dict() , f'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' )
with open(f'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , 'w' ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
def A ( ) -> List[str]:
A__ = {
'in_channels': 14,
'down_block_types': ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D'),
'up_block_types': (),
'out_block_type': 'ValueFunction',
'mid_block_type': 'ValueFunctionMidBlock1D',
'block_out_channels': (32, 64, 128, 256),
'layers_per_block': 1,
'downsample_each_block': True,
'sample_size': 65_536,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'use_timestep_embedding': True,
'flip_sin_to_cos': False,
'freq_shift': 1,
'norm_num_groups': 8,
'act_fn': 'mish',
}
A__ = torch.load('/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch' )
A__ = model
A__ = UNetaDModel(**__UpperCamelCase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
A__ = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
A__ = state_dict.pop(__UpperCamelCase )
hf_value_function.load_state_dict(__UpperCamelCase )
torch.save(hf_value_function.state_dict() , 'hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin' )
with open('hub/hopper-medium-v2/value_function/config.json' , 'w' ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
unet(3_2)
# unet(128)
value_function()
| 9 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""",
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = '''transfo-xl'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['''mems''']
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Optional[Any] ,__A : Union[str, Any]=26_7735 ,__A : List[Any]=[2_0000, 4_0000, 20_0000] ,__A : Dict=1024 ,__A : str=1024 ,__A : Dict=16 ,__A : int=64 ,__A : Dict=4096 ,__A : List[Any]=4 ,__A : Optional[int]=False ,__A : Union[str, Any]=18 ,__A : Tuple=1600 ,__A : str=1000 ,__A : Dict=True ,__A : Dict=True ,__A : int=0 ,__A : Optional[int]=-1 ,__A : int=True ,__A : List[str]=0.1 ,__A : Optional[int]=0.0 ,__A : str=True ,__A : Tuple="normal" ,__A : Union[str, Any]=0.01 ,__A : Tuple=0.01 ,__A : Any=0.02 ,__A : Union[str, Any]=1e-5 ,__A : List[Any]=0 ,**__A : str ,) -> List[Any]:
_lowercase = vocab_size
_lowercase = []
self.cutoffs.extend(__A )
if proj_share_all_but_first:
_lowercase = [False] + [True] * len(self.cutoffs )
else:
_lowercase = [False] + [False] * len(self.cutoffs )
_lowercase = d_model
_lowercase = d_embed
_lowercase = d_head
_lowercase = d_inner
_lowercase = div_val
_lowercase = pre_lnorm
_lowercase = n_layer
_lowercase = n_head
_lowercase = mem_len
_lowercase = same_length
_lowercase = attn_type
_lowercase = clamp_len
_lowercase = sample_softmax
_lowercase = adaptive
_lowercase = dropout
_lowercase = dropatt
_lowercase = untie_r
_lowercase = init
_lowercase = init_range
_lowercase = proj_init_std
_lowercase = init_std
_lowercase = layer_norm_epsilon
super().__init__(eos_token_id=__A ,**__A )
@property
def __UpperCAmelCase ( self : str ) -> Optional[int]:
# Message copied from Transformer-XL documentation
logger.info(F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def __UpperCAmelCase ( self : Any ,__A : Dict ) -> Optional[Any]:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" ) | 67 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
_lowerCAmelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "upernet"
def __init__( self : List[str] , _A : Union[str, Any]=None , _A : List[Any]=512 , _A : Dict=0.02 , _A : str=[1, 2, 3, 6] , _A : Tuple=True , _A : List[Any]=0.4 , _A : Optional[Any]=384 , _A : List[Any]=256 , _A : Optional[int]=1 , _A : str=False , _A : str=255 , **_A : int , ):
super().__init__(**_A )
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
_UpperCamelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
elif isinstance(_A , _A ):
_UpperCamelCase = backbone_config.get('''model_type''' )
_UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCamelCase = config_class.from_dict(_A )
_UpperCamelCase = backbone_config
_UpperCamelCase = hidden_size
_UpperCamelCase = initializer_range
_UpperCamelCase = pool_scales
_UpperCamelCase = use_auxiliary_head
_UpperCamelCase = auxiliary_loss_weight
_UpperCamelCase = auxiliary_in_channels
_UpperCamelCase = auxiliary_channels
_UpperCamelCase = auxiliary_num_convs
_UpperCamelCase = auxiliary_concat_input
_UpperCamelCase = loss_ignore_index
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = copy.deepcopy(self.__dict__ )
_UpperCamelCase = self.backbone_config.to_dict()
_UpperCamelCase = self.__class__.model_type
return output
| 10 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = '''dpr'''
def __init__( self : int ,__A : Union[str, Any]=3_0522 ,__A : Optional[int]=768 ,__A : int=12 ,__A : List[Any]=12 ,__A : Optional[Any]=3072 ,__A : Union[str, Any]="gelu" ,__A : Union[str, Any]=0.1 ,__A : List[Any]=0.1 ,__A : str=512 ,__A : List[str]=2 ,__A : Tuple=0.02 ,__A : Tuple=1e-12 ,__A : List[Any]=0 ,__A : List[str]="absolute" ,__A : int = 0 ,**__A : int ,) -> Tuple:
super().__init__(pad_token_id=__A ,**__A )
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = hidden_act
_lowercase = intermediate_size
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = max_position_embeddings
_lowercase = type_vocab_size
_lowercase = initializer_range
_lowercase = layer_norm_eps
_lowercase = projection_dim
_lowercase = position_embedding_type | 67 | 0 |
'''simple docstring'''
import math
def lowerCAmelCase (__A):
"""simple docstring"""
_a = math.loga(math.sqrt(4 * positive_integer + 1) / 2 + 1 / 2)
return exponent == int(__A)
def lowerCAmelCase (__A = 1 / 12_345):
"""simple docstring"""
_a = 0
_a = 0
_a = 3
while True:
_a = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(__A):
_a = int(__A)
total_partitions += 1
if check_partition_perfect(__A):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(__A)
integer += 1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 11 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
snake_case = Lock()
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[int] , snake_case__ :Union[str, Any] , snake_case__ :Tuple , snake_case__ :Any , snake_case__ :Dict , snake_case__ :Optional[int] , snake_case__ :List[str] ) -> Optional[Any]:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(snake_case__ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
_lowercase = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
_lowercase = min(snake_case__ , snake_case__ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(snake_case__ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
_lowercase = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
_lowercase = max(snake_case__ , snake_case__ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> Dict:
_lowercase = []
_lowercase = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
_lowercase = Pipe()
_lowercase = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
_lowercase = temp_rs
_lowercase = temp_rr
for i in range(1 , len(snake_case__ ) - 1 ):
_lowercase = Pipe()
_lowercase = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
_lowercase = temp_rs
_lowercase = temp_rr
process_array_.append(
Process(
target=snake_case__ , args=(
len(snake_case__ ) - 1,
arr[len(snake_case__ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(snake_case__ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(snake_case__ ) ):
_lowercase = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
_lowercase = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*snake_case__ )
_lowercase = odd_even_transposition(snake_case__ )
print('Sorted List\n' )
print(*snake_case__ )
if __name__ == "__main__":
main() | 67 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ : Dict = logging.get_logger(__name__)
def UpperCamelCase ( lowercase_ ) -> int:
'''simple docstring'''
lowercase__ : List[Any] = DPTConfig(embedding_type="""hybrid""" )
if "large" in checkpoint_url:
lowercase__ : Union[str, Any] = 10_24
lowercase__ : Dict = 40_96
lowercase__ : Optional[Any] = 24
lowercase__ : List[Any] = 16
lowercase__ : str = [5, 11, 17, 23]
lowercase__ : Union[str, Any] = [2_56, 5_12, 10_24, 10_24]
lowercase__ : Optional[Any] = (1, 3_84, 3_84)
if "nyu" or "midas" in checkpoint_url:
lowercase__ : Any = 7_68
lowercase__ : Tuple = [1, 1, 1, 0.5]
lowercase__ : Union[str, Any] = [2_56, 5_12, 7_68, 7_68]
lowercase__ : List[Any] = 1_50
lowercase__ : str = 16
lowercase__ : Optional[Any] = (1, 3_84, 3_84)
lowercase__ : Union[str, Any] = False
lowercase__ : List[str] = """project"""
if "ade" in checkpoint_url:
lowercase__ : Union[str, Any] = True
lowercase__ : Tuple = 7_68
lowercase__ : Union[str, Any] = [1, 1, 1, 0.5]
lowercase__ : Optional[Any] = 1_50
lowercase__ : Optional[int] = 16
lowercase__ : Optional[int] = """huggingface/label-files"""
lowercase__ : List[str] = """ade20k-id2label.json"""
lowercase__ : List[str] = json.load(open(cached_download(hf_hub_url(lowercase_ , lowercase_ , repo_type="""dataset""" ) ) , """r""" ) )
lowercase__ : Optional[Any] = {int(lowercase_ ): v for k, v in idalabel.items()}
lowercase__ : List[str] = idalabel
lowercase__ : List[Any] = {v: k for k, v in idalabel.items()}
lowercase__ : Any = [1, 1_50, 4_80, 4_80]
return config, expected_shape
def UpperCamelCase ( lowercase_ ) -> Any:
'''simple docstring'''
lowercase__ : Dict = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(lowercase_ , lowercase_ )
def UpperCamelCase ( lowercase_ ) -> Any:
'''simple docstring'''
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowercase__ : List[str] = name.replace("""pretrained.model""" , """dpt.encoder""" )
if "pretrained.model" in name:
lowercase__ : Tuple = name.replace("""pretrained.model""" , """dpt.embeddings""" )
if "patch_embed" in name:
lowercase__ : Any = name.replace("""patch_embed""" , """""" )
if "pos_embed" in name:
lowercase__ : Union[str, Any] = name.replace("""pos_embed""" , """position_embeddings""" )
if "attn.proj" in name:
lowercase__ : Optional[int] = name.replace("""attn.proj""" , """attention.output.dense""" )
if "proj" in name and "project" not in name:
lowercase__ : Any = name.replace("""proj""" , """projection""" )
if "blocks" in name:
lowercase__ : Any = name.replace("""blocks""" , """layer""" )
if "mlp.fc1" in name:
lowercase__ : Optional[Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowercase__ : Any = name.replace("""mlp.fc2""" , """output.dense""" )
if "norm1" in name and "backbone" not in name:
lowercase__ : Dict = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name and "backbone" not in name:
lowercase__ : Tuple = name.replace("""norm2""" , """layernorm_after""" )
if "scratch.output_conv" in name:
lowercase__ : Any = name.replace("""scratch.output_conv""" , """head""" )
if "scratch" in name:
lowercase__ : int = name.replace("""scratch""" , """neck""" )
if "layer1_rn" in name:
lowercase__ : Tuple = name.replace("""layer1_rn""" , """convs.0""" )
if "layer2_rn" in name:
lowercase__ : List[str] = name.replace("""layer2_rn""" , """convs.1""" )
if "layer3_rn" in name:
lowercase__ : Dict = name.replace("""layer3_rn""" , """convs.2""" )
if "layer4_rn" in name:
lowercase__ : List[str] = name.replace("""layer4_rn""" , """convs.3""" )
if "refinenet" in name:
lowercase__ : int = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowercase__ : List[str] = name.replace(F'refinenet{layer_idx}' , F'fusion_stage.layers.{abs(layer_idx-4 )}' )
if "out_conv" in name:
lowercase__ : List[Any] = name.replace("""out_conv""" , """projection""" )
if "resConfUnit1" in name:
lowercase__ : Union[str, Any] = name.replace("""resConfUnit1""" , """residual_layer1""" )
if "resConfUnit2" in name:
lowercase__ : Optional[Any] = name.replace("""resConfUnit2""" , """residual_layer2""" )
if "conv1" in name:
lowercase__ : Optional[Any] = name.replace("""conv1""" , """convolution1""" )
if "conv2" in name:
lowercase__ : Optional[Any] = name.replace("""conv2""" , """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowercase__ : Union[str, Any] = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
lowercase__ : Optional[int] = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
lowercase__ : Any = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
lowercase__ : int = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowercase__ : str = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
lowercase__ : Tuple = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
lowercase__ : Union[str, Any] = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
lowercase__ : Tuple = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
lowercase__ : Union[str, Any] = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
lowercase__ : Dict = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
lowercase__ : int = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
lowercase__ : str = name.replace("""pretrained""" , """dpt""" )
if "bn" in name:
lowercase__ : Any = name.replace("""bn""" , """batch_norm""" )
if "head" in name:
lowercase__ : Tuple = name.replace("""head""" , """head.head""" )
if "encoder.norm" in name:
lowercase__ : List[Any] = name.replace("""encoder.norm""" , """layernorm""" )
if "auxlayer" in name:
lowercase__ : List[Any] = name.replace("""auxlayer""" , """auxiliary_head.head""" )
if "backbone" in name:
lowercase__ : List[Any] = name.replace("""backbone""" , """backbone.bit.encoder""" )
if ".." in name:
lowercase__ : Tuple = name.replace("""..""" , """.""" )
if "stem.conv" in name:
lowercase__ : List[Any] = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
lowercase__ : Any = name.replace("""blocks""" , """layers""" )
if "convolution" in name and "backbone" in name:
lowercase__ : Dict = name.replace("""convolution""" , """conv""" )
if "layer" in name and "backbone" in name:
lowercase__ : Dict = name.replace("""layer""" , """layers""" )
if "backbone.bit.encoder.bit" in name:
lowercase__ : Optional[int] = name.replace("""backbone.bit.encoder.bit""" , """backbone.bit""" )
if "embedder.conv" in name:
lowercase__ : Union[str, Any] = name.replace("""embedder.conv""" , """embedder.convolution""" )
if "backbone.bit.encoder.stem.norm" in name:
lowercase__ : str = name.replace("""backbone.bit.encoder.stem.norm""" , """backbone.bit.embedder.norm""" )
return name
def UpperCamelCase ( lowercase_ , lowercase_ ) -> List[Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase__ : str = state_dict.pop(F'dpt.encoder.layer.{i}.attn.qkv.weight' )
lowercase__ : Tuple = state_dict.pop(F'dpt.encoder.layer.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
lowercase__ : Tuple = in_proj_weight[: config.hidden_size, :]
lowercase__ : Union[str, Any] = in_proj_bias[: config.hidden_size]
lowercase__ : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase__ : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase__ : Any = in_proj_weight[
-config.hidden_size :, :
]
lowercase__ : Optional[Any] = in_proj_bias[-config.hidden_size :]
def UpperCamelCase ( ) -> Tuple:
'''simple docstring'''
lowercase__ : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase__ : Union[str, Any] = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
@torch.no_grad()
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Tuple:
'''simple docstring'''
lowercase__ , lowercase__ : str = get_dpt_config(lowercase_ )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
lowercase__ : Any = torch.load(lowercase_ , map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(lowercase_ )
# rename keys
for key in state_dict.copy().keys():
lowercase__ : str = state_dict.pop(lowercase_ )
lowercase__ : int = val
# read in qkv matrices
read_in_q_k_v(lowercase_ , lowercase_ )
# load HuggingFace model
lowercase__ : Any = DPTForSemanticSegmentation(lowercase_ ) if """ade""" in checkpoint_url else DPTForDepthEstimation(lowercase_ )
model.load_state_dict(lowercase_ )
model.eval()
# Check outputs on an image
lowercase__ : List[Any] = 4_80 if """ade""" in checkpoint_url else 3_84
lowercase__ : Dict = DPTImageProcessor(size=lowercase_ )
lowercase__ : Optional[int] = prepare_img()
lowercase__ : Dict = image_processor(lowercase_ , return_tensors="""pt""" )
# forward pass
lowercase__ : Any = model(**lowercase_ ).logits if """ade""" in checkpoint_url else model(**lowercase_ ).predicted_depth
if show_prediction:
lowercase__ : Union[str, Any] = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="""bicubic""" , align_corners=lowercase_ , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 2_55 ).show()
if pytorch_dump_folder_path is not None:
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(lowercase_ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowercase_ )
if push_to_hub:
model.push_to_hub("""ybelkada/dpt-hybrid-midas""" )
image_processor.push_to_hub("""ybelkada/dpt-hybrid-midas""" )
if __name__ == "__main__":
lowerCamelCase__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you're pushing to the hub.""",
)
parser.add_argument(
"""--show_prediction""",
action="""store_true""",
)
lowerCamelCase__ : Any = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 12 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""",
"""google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""",
"""google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = '''big_bird'''
def __init__( self : str ,__A : Union[str, Any]=5_0358 ,__A : Any=768 ,__A : List[str]=12 ,__A : Union[str, Any]=12 ,__A : int=3072 ,__A : Tuple="gelu_new" ,__A : Any=0.1 ,__A : Optional[Any]=0.1 ,__A : Tuple=4096 ,__A : int=2 ,__A : Union[str, Any]=0.02 ,__A : Optional[int]=1e-12 ,__A : List[str]=True ,__A : List[Any]=0 ,__A : Optional[Any]=1 ,__A : Optional[int]=2 ,__A : Optional[int]=66 ,__A : Tuple="block_sparse" ,__A : Optional[int]=True ,__A : Optional[int]=False ,__A : Tuple=64 ,__A : str=3 ,__A : Optional[int]=None ,**__A : Dict ,) -> Union[str, Any]:
super().__init__(
pad_token_id=__A ,bos_token_id=__A ,eos_token_id=__A ,sep_token_id=__A ,**__A ,)
_lowercase = vocab_size
_lowercase = max_position_embeddings
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = intermediate_size
_lowercase = hidden_act
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = initializer_range
_lowercase = type_vocab_size
_lowercase = layer_norm_eps
_lowercase = use_cache
_lowercase = rescale_embeddings
_lowercase = attention_type
_lowercase = use_bias
_lowercase = block_size
_lowercase = num_random_blocks
_lowercase = classifier_dropout
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : int ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_lowercase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowercase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] ) | 67 | 0 |
'''simple docstring'''
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def UpperCAmelCase__ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int ) -> Optional[int]:
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def UpperCAmelCase__ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str ) -> Union[str, Any]:
__lowerCamelCase : List[Any] = tmp_path / 'cache'
__lowerCamelCase : Any = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCamelCase : str = SqlDatasetReader(
'dataset' , 'sqlite:///' + sqlite_path , cache_dir=UpperCAmelCase_ , keep_in_memory=UpperCAmelCase_ ).read()
_check_sql_dataset(UpperCAmelCase_ , UpperCAmelCase_ )
@require_sqlalchemy
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def UpperCAmelCase__ ( UpperCAmelCase_ : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] ) -> Optional[Any]:
__lowerCamelCase : Tuple = tmp_path / 'cache'
__lowerCamelCase : Tuple = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__lowerCamelCase : str = features.copy() if features else default_expected_features
__lowerCamelCase : List[Any] = (
Features({feature: Value(UpperCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCamelCase : Optional[Any] = SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ ).read()
_check_sql_dataset(UpperCAmelCase_ , UpperCAmelCase_ )
def UpperCAmelCase__ ( UpperCAmelCase_ : Any ) -> Any:
with contextlib.closing(sqlitea.connect(UpperCAmelCase_ ) ) as con:
__lowerCamelCase : List[str] = con.cursor()
cur.execute('SELECT * FROM dataset' )
for row in cur:
yield row
@require_sqlalchemy
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] ) -> Union[str, Any]:
__lowerCamelCase : Optional[int] = tmp_path / 'cache'
__lowerCamelCase : Any = os.path.join(UpperCAmelCase_ , 'tmp.sql' )
__lowerCamelCase : List[str] = SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , cache_dir=UpperCAmelCase_ ).read()
SqlDatasetWriter(UpperCAmelCase_ , 'dataset' , 'sqlite:///' + output_sqlite_path , num_proc=1 ).write()
__lowerCamelCase : Optional[Any] = iter_sql_file(UpperCAmelCase_ )
__lowerCamelCase : int = iter_sql_file(UpperCAmelCase_ )
for rowa, rowa in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
assert rowa == rowa
@require_sqlalchemy
def UpperCAmelCase__ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict ) -> List[Any]:
__lowerCamelCase : List[str] = tmp_path / 'cache'
__lowerCamelCase : Dict = os.path.join(UpperCAmelCase_ , 'tmp.sql' )
__lowerCamelCase : Union[str, Any] = SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , cache_dir=UpperCAmelCase_ ).read()
SqlDatasetWriter(UpperCAmelCase_ , 'dataset' , 'sqlite:///' + output_sqlite_path , num_proc=2 ).write()
__lowerCamelCase : str = iter_sql_file(UpperCAmelCase_ )
__lowerCamelCase : List[Any] = iter_sql_file(UpperCAmelCase_ )
for rowa, rowa in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
assert rowa == rowa
@require_sqlalchemy
def UpperCAmelCase__ ( UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str] ) -> int:
__lowerCamelCase : str = tmp_path / 'cache'
__lowerCamelCase : Dict = os.path.join(UpperCAmelCase_ , 'tmp.sql' )
__lowerCamelCase : int = SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , cache_dir=UpperCAmelCase_ ).read()
with pytest.raises(UpperCAmelCase_ ):
SqlDatasetWriter(UpperCAmelCase_ , 'dataset' , 'sqlite:///' + output_sqlite_path , num_proc=0 ).write()
| 13 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> list:
_lowercase = [0] * len(snake_case__ )
for i in range(1 , len(snake_case__ ) ):
# use last results for better performance - dynamic programming
_lowercase = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
_lowercase = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
_lowercase = j
return prefix_result
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> int:
return max(prefix_function(snake_case__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 67 | 0 |
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
a__ = '''__DUMMY_TRANSFORMERS_USER__'''
a__ = '''Dummy User'''
a__ = '''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'''
a__ = '''https://hub-ci.huggingface.co'''
a__ = CI_HUB_ENDPOINT + '''/datasets/{repo_id}/resolve/{revision}/{path}'''
a__ = CI_HUB_ENDPOINT + '''/{repo_id}/resolve/{revision}/{filename}'''
a__ = Path('''~/.huggingface/hub_ci_token''').expanduser()
@pytest.fixture
def __UpperCAmelCase ( __a : Tuple ) -> Dict:
"""simple docstring"""
monkeypatch.setattr(
'''huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE''' ,__a )
@pytest.fixture
def __UpperCAmelCase ( __a : Dict ) -> List[str]:
"""simple docstring"""
monkeypatch.setattr('''datasets.config.HF_ENDPOINT''' ,__a )
monkeypatch.setattr('''datasets.config.HUB_DATASETS_URL''' ,__a )
@pytest.fixture
def __UpperCAmelCase ( __a : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
monkeypatch.setattr('''huggingface_hub.hf_api.HfFolder.path_token''' ,__a )
@pytest.fixture
def __UpperCAmelCase ( __a : Optional[int] ,__a : Dict ) -> Union[str, Any]:
"""simple docstring"""
HfFolder.save_token(__a )
yield
HfFolder.delete_token()
@pytest.fixture(scope='''session''' )
def __UpperCAmelCase ( ) -> str:
"""simple docstring"""
return HfApi(endpoint=__a )
@pytest.fixture(scope='''session''' )
def __UpperCAmelCase ( __a : HfApi ) -> List[Any]:
"""simple docstring"""
_a : Optional[int] = HfFolder.get_token()
HfFolder.save_token(__a )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(__a )
@pytest.fixture
def __UpperCAmelCase ( __a : Optional[int] ) -> Dict:
"""simple docstring"""
def _cleanup_repo(__a : int ):
hf_api.delete_repo(__a ,token=__a ,repo_type='''dataset''' )
return _cleanup_repo
@pytest.fixture
def __UpperCAmelCase ( __a : List[str] ) -> int:
"""simple docstring"""
@contextmanager
def _temporary_repo(__a : Optional[Any] ):
try:
yield repo_id
finally:
cleanup_repo(__a )
return _temporary_repo
@pytest.fixture(scope='''session''' )
def __UpperCAmelCase ( __a : HfApi ,__a : str ,__a : int ) -> Dict:
"""simple docstring"""
_a : List[str] = F"""repo_txt_data-{int(time.time() * 10E3 )}"""
_a : List[Any] = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(__a ,token=__a ,repo_type='''dataset''' ,private=__a )
hf_api.upload_file(
token=__a ,path_or_fileobj=str(__a ) ,path_in_repo='''data/text_data.txt''' ,repo_id=__a ,repo_type='''dataset''' ,)
yield repo_id
try:
hf_api.delete_repo(__a ,token=__a ,repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __UpperCAmelCase ( __a : int ,__a : List[Any] ,__a : Dict ) -> List[Any]:
"""simple docstring"""
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope='''session''' )
def __UpperCAmelCase ( __a : HfApi ,__a : str ,__a : Tuple ) -> Dict:
"""simple docstring"""
_a : List[str] = F"""repo_zipped_txt_data-{int(time.time() * 10E3 )}"""
_a : Optional[int] = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(__a ,token=__a ,repo_type='''dataset''' ,private=__a )
hf_api.upload_file(
token=__a ,path_or_fileobj=str(__a ) ,path_in_repo='''data.zip''' ,repo_id=__a ,repo_type='''dataset''' ,)
yield repo_id
try:
hf_api.delete_repo(__a ,token=__a ,repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __UpperCAmelCase ( __a : Optional[Any] ,__a : Tuple ,__a : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope='''session''' )
def __UpperCAmelCase ( __a : HfApi ,__a : Any ,__a : Dict ) -> str:
"""simple docstring"""
_a : Dict = F"""repo_zipped_img_data-{int(time.time() * 10E3 )}"""
_a : str = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(__a ,token=__a ,repo_type='''dataset''' ,private=__a )
hf_api.upload_file(
token=__a ,path_or_fileobj=str(__a ) ,path_in_repo='''data.zip''' ,repo_id=__a ,repo_type='''dataset''' ,)
yield repo_id
try:
hf_api.delete_repo(__a ,token=__a ,repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __UpperCAmelCase ( __a : List[Any] ,__a : Optional[Any] ,__a : List[str] ) -> Optional[int]:
"""simple docstring"""
return hf_private_dataset_repo_zipped_img_data_
| 14 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> Union[str, Any]:
_lowercase = len(snake_case__ )
_lowercase = sum(snake_case__ )
_lowercase = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
_lowercase = True
for i in range(1 , s + 1 ):
_lowercase = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
_lowercase = dp[i][j - 1]
if arr[i - 1] <= j:
_lowercase = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
_lowercase = s - 2 * j
break
return diff | 67 | 0 |
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class A ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__(self : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any=13 , _UpperCAmelCase : str=7 , _UpperCAmelCase : str=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : Any=False , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : Optional[int]=99 , _UpperCAmelCase : Any=0 , _UpperCAmelCase : Dict=32 , _UpperCAmelCase : Tuple=5 , _UpperCAmelCase : Union[str, Any]=4 , _UpperCAmelCase : Union[str, Any]=0.1 , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : List[str]=512 , _UpperCAmelCase : Tuple=12 , _UpperCAmelCase : int=2 , _UpperCAmelCase : Tuple=0.02 , _UpperCAmelCase : Union[str, Any]=3 , _UpperCAmelCase : Optional[Any]=4 , _UpperCAmelCase : Dict="last" , _UpperCAmelCase : Any=None , _UpperCAmelCase : int=None , ) -> int:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_lengths
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = gelu_activation
lowercase__ = sinusoidal_embeddings
lowercase__ = causal
lowercase__ = asm
lowercase__ = n_langs
lowercase__ = vocab_size
lowercase__ = n_special
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = summary_type
lowercase__ = use_proj
lowercase__ = scope
def lowerCamelCase__ (self : List[str] ) -> List[str]:
"""simple docstring"""
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_input_lengths:
lowercase__ = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ = ids_tensor([self.batch_size] , 2 ).float()
lowercase__ = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowerCamelCase__ (self : int ) -> Dict:
"""simple docstring"""
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def lowerCamelCase__ (self : Tuple , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = FlaubertModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase , lengths=_UpperCAmelCase , langs=_UpperCAmelCase )
lowercase__ = model(_UpperCAmelCase , langs=_UpperCAmelCase )
lowercase__ = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ (self : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = FlaubertWithLMHeadModel(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] , ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = FlaubertForQuestionAnsweringSimple(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase )
lowercase__ = model(_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ (self : str , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , ) -> List[Any]:
"""simple docstring"""
lowercase__ = FlaubertForQuestionAnswering(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase )
lowercase__ = model(
_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , cls_index=_UpperCAmelCase , is_impossible=_UpperCAmelCase , p_mask=_UpperCAmelCase , )
lowercase__ = model(
_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , cls_index=_UpperCAmelCase , is_impossible=_UpperCAmelCase , )
((lowercase__) , ) = result_with_labels.to_tuple()
lowercase__ = model(_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase )
((lowercase__) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] , ) -> List[str]:
"""simple docstring"""
lowercase__ = FlaubertForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase )
lowercase__ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase__ (self : Any , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Dict , _UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , ) -> str:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = FlaubertForTokenClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : List[str] , ) -> List[str]:
"""simple docstring"""
lowercase__ = self.num_choices
lowercase__ = FlaubertForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase__ (self : Dict ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""lengths""": input_lengths,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class A ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
A__ = (
{
'''feature-extraction''': FlaubertModel,
'''fill-mask''': FlaubertWithLMHeadModel,
'''question-answering''': FlaubertForQuestionAnsweringSimple,
'''text-classification''': FlaubertForSequenceClassification,
'''token-classification''': FlaubertForTokenClassification,
'''zero-shot''': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : int ) -> str:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowerCamelCase__ (self : Any , _UpperCAmelCase : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any]=False ) -> Optional[int]:
"""simple docstring"""
lowercase__ = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowercase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
lowercase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
return inputs_dict
def lowerCamelCase__ (self : str ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = FlaubertModelTester(self )
lowercase__ = ConfigTester(self , config_class=_UpperCAmelCase , emb_dim=37 )
def lowerCamelCase__ (self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase__ (self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*_UpperCAmelCase )
def lowerCamelCase__ (self : List[Any] ) -> str:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*_UpperCAmelCase )
def lowerCamelCase__ (self : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*_UpperCAmelCase )
def lowerCamelCase__ (self : str ) -> Any:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*_UpperCAmelCase )
def lowerCamelCase__ (self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*_UpperCAmelCase )
def lowerCamelCase__ (self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*_UpperCAmelCase )
def lowerCamelCase__ (self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*_UpperCAmelCase )
@slow
def lowerCamelCase__ (self : int ) -> List[Any]:
"""simple docstring"""
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = FlaubertModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@slow
@require_torch_gpu
def lowerCamelCase__ (self : Dict ) -> int:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowercase__ = True
lowercase__ = model_class(config=_UpperCAmelCase )
lowercase__ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
lowercase__ = torch.jit.trace(
_UpperCAmelCase , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_UpperCAmelCase , os.path.join(_UpperCAmelCase , """traced_model.pt""" ) )
lowercase__ = torch.jit.load(os.path.join(_UpperCAmelCase , """traced_model.pt""" ) , map_location=_UpperCAmelCase )
loaded(inputs_dict["""input_ids"""].to(_UpperCAmelCase ) , inputs_dict["""attention_mask"""].to(_UpperCAmelCase ) )
@require_torch
class A ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCamelCase__ (self : Tuple ) -> Dict:
"""simple docstring"""
lowercase__ = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" )
lowercase__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
lowercase__ = model(_UpperCAmelCase )[0]
lowercase__ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _UpperCAmelCase )
lowercase__ = torch.tensor(
[[[-2.6_251, -1.4_298, -0.0_227], [-2.8_510, -1.6_387, 0.2_258], [-2.8_114, -1.1_832, -0.3_066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1E-4 ) )
| 15 |
from manim import *
class A_ ( UpperCAmelCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
_lowercase = Rectangle(height=0.5 ,width=0.5 )
_lowercase = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
_lowercase = Rectangle(height=0.25 ,width=0.25 )
_lowercase = [mem.copy() for i in range(6 )]
_lowercase = [mem.copy() for i in range(6 )]
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = VGroup(__A ,__A ).arrange(__A ,buff=0 )
_lowercase = Text('CPU' ,font_size=24 )
_lowercase = Group(__A ,__A ).arrange(__A ,buff=0.5 ,aligned_edge=__A )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__A )
_lowercase = [mem.copy() for i in range(4 )]
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = Text('GPU' ,font_size=24 )
_lowercase = Group(__A ,__A ).arrange(__A ,buff=0.5 ,aligned_edge=__A )
gpu.move_to([-1, -1, 0] )
self.add(__A )
_lowercase = [mem.copy() for i in range(6 )]
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = Text('Model' ,font_size=24 )
_lowercase = Group(__A ,__A ).arrange(__A ,buff=0.5 ,aligned_edge=__A )
model.move_to([3, -1.0, 0] )
self.add(__A )
_lowercase = []
_lowercase = []
for i, rect in enumerate(__A ):
_lowercase = fill.copy().set_fill(__A ,opacity=0.8 )
target.move_to(__A )
model_arr.append(__A )
_lowercase = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(__A ,opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(__A )
self.add(*__A ,*__A )
_lowercase = [meta_mem.copy() for i in range(6 )]
_lowercase = [meta_mem.copy() for i in range(6 )]
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = VGroup(__A ,__A ).arrange(__A ,buff=0 )
_lowercase = Text('Disk' ,font_size=24 )
_lowercase = Group(__A ,__A ).arrange(__A ,buff=0.5 ,aligned_edge=__A )
disk.move_to([-4, -1.25, 0] )
self.add(__A ,__A )
_lowercase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_lowercase = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
self.add(__A ,__A )
_lowercase = MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" ,font_size=18 ,)
blue_text.next_to(__A ,DOWN * 2.4 ,aligned_edge=key_text.get_left() )
self.add(__A )
_lowercase = MarkupText(
F"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__A ) )
_lowercase = Square(0.3 )
input.set_fill(__A ,opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] ,__A ,buff=0.5 )
self.play(Write(__A ) )
input.generate_target()
input.target.next_to(model_arr[0] ,direction=__A ,buff=0.02 )
self.play(MoveToTarget(__A ) )
self.play(FadeOut(__A ) )
_lowercase = Arrow(start=__A ,end=__A ,color=__A ,buff=0.5 )
a.next_to(model_arr[0].get_left() ,__A ,buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
_lowercase = MarkupText(
F"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__A ,run_time=3 ) )
_lowercase = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(__A ) ,Circumscribe(model_arr[0] ,color=__A ,**__A ) ,Circumscribe(model_cpu_arr[0] ,color=__A ,**__A ) ,Circumscribe(gpu_rect[0] ,color=__A ,**__A ) ,)
self.play(MoveToTarget(model_cpu_arr[0] ) )
_lowercase = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 ,__A ,buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
_lowercase = AnimationGroup(
FadeOut(__A ,run_time=0.5 ) ,MoveToTarget(__A ,run_time=0.5 ) ,FadeIn(__A ,run_time=0.5 ) ,lag_ratio=0.2 )
self.play(__A )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
_lowercase = 0.7
self.play(
Circumscribe(model_arr[i] ,**__A ) ,Circumscribe(cpu_left_col_base[i] ,**__A ) ,Circumscribe(cpu_left_col_base[i + 1] ,color=__A ,**__A ) ,Circumscribe(gpu_rect[0] ,color=__A ,**__A ) ,Circumscribe(model_arr[i + 1] ,color=__A ,**__A ) ,)
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) ,MoveToTarget(model_cpu_arr[i + 1] ) ,)
else:
self.play(
MoveToTarget(model_cpu_arr[i] ,run_time=0.7 ) ,MoveToTarget(model_cpu_arr[i + 1] ,run_time=0.7 ) ,)
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() ,RIGHT + 0.02 ,buff=0.2 )
self.play(
Circumscribe(model_arr[-1] ,color=__A ,**__A ) ,Circumscribe(cpu_left_col_base[-1] ,color=__A ,**__A ) ,Circumscribe(gpu_rect[0] ,color=__A ,**__A ) ,)
self.play(MoveToTarget(model_cpu_arr[i] ) )
_lowercase = a_c
_lowercase = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] ,RIGHT + 0.02 ,buff=0.5 )
self.play(
FadeOut(__A ) ,FadeOut(__A ,run_time=0.5 ) ,)
_lowercase = MarkupText(F"""Inference on a model too large for GPU memory\nis successfully completed.""" ,font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__A ,run_time=3 ) ,MoveToTarget(__A ) )
self.wait() | 67 | 0 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : int=13 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : str=True , __lowerCamelCase : Any=True , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Optional[int]=224 , __lowerCamelCase : Any=1000 , __lowerCamelCase : Optional[Any]=[3, 3, 6, 4] , __lowerCamelCase : List[Any]=[48, 56, 112, 220] , ):
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = layer_depths
SCREAMING_SNAKE_CASE = embed_dims
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def _snake_case ( self : Dict ):
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="gelu" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=__lowerCamelCase , layer_scale_init_value=1e-5 , )
def _snake_case ( self : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = SwiftFormerModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def _snake_case ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] ):
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self : int ):
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
lowerCamelCase__ = (
{"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = SwiftFormerModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(
self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def _snake_case ( self : List[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="SwiftFormer does not use inputs_embeds" )
def _snake_case ( self : Optional[int] ):
pass
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) )
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def _snake_case ( self : Tuple ):
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = SwiftFormerModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@unittest.skip(reason="SwiftFormer does not output attentions" )
def _snake_case ( self : Union[str, Any] ):
pass
def _snake_case ( self : Optional[Any] ):
def check_hidden_states_output(__lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Tuple ):
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
SCREAMING_SNAKE_CASE = outputs.hidden_states
SCREAMING_SNAKE_CASE = 8
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(__lowerCamelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : List[Any] ):
def _config_zero_init(__lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = copy.deepcopy(__lowerCamelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(__lowerCamelCase , __lowerCamelCase , 1e-10 )
if isinstance(getattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = _config_zero_init(getattr(__lowerCamelCase , __lowerCamelCase ) )
setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return configs_no_init
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = _config_zero_init(__lowerCamelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(config=__lowerCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _snake_case ( self : str ):
pass
def __a ( ):
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self : List[str] ):
return ViTImageProcessor.from_pretrained("MBZUAI/swiftformer-xs" ) if is_vision_available() else None
@slow
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification.from_pretrained("MBZUAI/swiftformer-xs" ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**__lowerCamelCase )
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([[-2.17_03e00, 2.11_07e00, -2.08_11e00]] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) ) | 16 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class A_ :
"""simple docstring"""
def __init__( self : Dict ,__A : Any ,__A : Tuple=None ,__A : Optional[int]=None ,__A : Optional[int]=None ,__A : int="resnet50" ,__A : int=3 ,__A : List[Any]=32 ,__A : Tuple=3 ,__A : List[Any]=True ,__A : Tuple=True ,) -> Any:
_lowercase = parent
_lowercase = out_indices if out_indices is not None else [4]
_lowercase = stage_names
_lowercase = out_features
_lowercase = backbone
_lowercase = batch_size
_lowercase = image_size
_lowercase = num_channels
_lowercase = use_pretrained_backbone
_lowercase = is_training
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
_lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase = self.get_config()
return config, pixel_values
def __UpperCAmelCase ( self : Tuple ) -> Tuple:
return TimmBackboneConfig(
image_size=self.image_size ,num_channels=self.num_channels ,out_features=self.out_features ,out_indices=self.out_indices ,stage_names=self.stage_names ,use_pretrained_backbone=self.use_pretrained_backbone ,backbone=self.backbone ,)
def __UpperCAmelCase ( self : Any ,__A : Any ,__A : Dict ) -> Union[str, Any]:
_lowercase = TimmBackbone(config=__A )
model.to(__A )
model.eval()
with torch.no_grad():
_lowercase = model(__A )
self.parent.assertEqual(
result.feature_map[-1].shape ,(self.batch_size, model.channels[-1], 14, 14) ,)
def __UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
_lowercase = self.prepare_config_and_inputs()
_lowercase , _lowercase = config_and_inputs
_lowercase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class A_ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (TimmBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : List[str] = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : Tuple = False
SCREAMING_SNAKE_CASE_ : List[str] = False
SCREAMING_SNAKE_CASE_ : Any = False
def __UpperCAmelCase ( self : str ) -> Optional[int]:
_lowercase = TimmBackboneModelTester(self )
_lowercase = ConfigTester(self ,config_class=__A ,has_text_modality=__A )
def __UpperCAmelCase ( self : int ) -> Tuple:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCAmelCase ( self : List[Any] ) -> List[str]:
_lowercase = 'resnet18'
_lowercase = 'microsoft/resnet-18'
_lowercase = AutoBackbone.from_pretrained(__A ,use_timm_backbone=__A )
_lowercase = AutoBackbone.from_pretrained(__A )
self.assertEqual(len(timm_model.out_features ) ,len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) ,len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels ,transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices ,(-1,) )
self.assertEqual(transformers_model.out_indices ,[len(timm_model.stage_names ) - 1] )
_lowercase = AutoBackbone.from_pretrained(__A ,use_timm_backbone=__A ,out_indices=[1, 2, 3] )
_lowercase = AutoBackbone.from_pretrained(__A ,out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices ,transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) ,len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels ,transformers_model.channels )
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking' )
def __UpperCAmelCase ( self : Optional[int] ) -> Any:
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' )
def __UpperCAmelCase ( self : Optional[int] ) -> Any:
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side' )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __UpperCAmelCase ( self : Any ) -> List[Any]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __UpperCAmelCase ( self : int ) -> Any:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' )
def __UpperCAmelCase ( self : Any ) -> Optional[Any]:
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.' )
def __UpperCAmelCase ( self : Any ) -> Any:
pass
@unittest.skip('Safetensors is not supported by timm.' )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __UpperCAmelCase ( self : int ) -> Optional[Any]:
pass
def __UpperCAmelCase ( self : Dict ) -> int:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = model_class(__A )
_lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase = [*signature.parameters.keys()]
_lowercase = ['pixel_values']
self.assertListEqual(arg_names[:1] ,__A )
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase = True
_lowercase = self.has_attentions
# no need to test all models as different heads yield the same functionality
_lowercase = self.all_model_classes[0]
_lowercase = model_class(__A )
model.to(__A )
_lowercase = self._prepare_for_class(__A ,__A )
_lowercase = model(**__A )
_lowercase = outputs[0][-1]
# Encoder-/Decoder-only models
_lowercase = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
_lowercase = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__A )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __UpperCAmelCase ( self : List[str] ) -> int:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = model_class(__A )
model.to(__A )
model.eval()
_lowercase = model(**__A )
self.assertEqual(len(result.feature_maps ) ,len(config.out_indices ) )
self.assertEqual(len(model.channels ) ,len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
_lowercase = copy.deepcopy(__A )
_lowercase = None
_lowercase = model_class(__A )
model.to(__A )
model.eval()
_lowercase = model(**__A )
self.assertEqual(len(result.feature_maps ) ,1 )
self.assertEqual(len(model.channels ) ,1 )
# Check backbone can be initialized with fresh weights
_lowercase = copy.deepcopy(__A )
_lowercase = False
_lowercase = model_class(__A )
model.to(__A )
model.eval()
_lowercase = model(**__A ) | 67 | 0 |
from __future__ import annotations
import math
def __SCREAMING_SNAKE_CASE ( a__ : int ,a__ : int ,a__ : bool ,a__ : list[int] ,a__ : float ) -> int:
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if not scores:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 ,node_index * 2 ,a__ ,a__ ,a__ ) ,minimax(depth + 1 ,node_index * 2 + 1 ,a__ ,a__ ,a__ ) ,)
if is_max
else min(
minimax(depth + 1 ,node_index * 2 ,a__ ,a__ ,a__ ) ,minimax(depth + 1 ,node_index * 2 + 1 ,a__ ,a__ ,a__ ) ,)
)
def __SCREAMING_SNAKE_CASE ( ) -> None:
__A : Any = [90, 23, 6, 33, 21, 65, 123, 34423]
__A : List[Any] = math.log(len(a__ ) ,2 )
print(f"""Optimal value : {minimax(0 ,0 ,a__ ,a__ ,a__ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 17 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 67 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class lowerCAmelCase_ ( unittest.TestCase ):
def _snake_case ( self ) -> Any:
_lowerCAmelCase = 10
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = [1, 2, 3, 4]
_lowerCAmelCase = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(_lowerCAmelCase , self.block_size , 0 ) , _lowerCAmelCase )
def _snake_case ( self ) -> List[Any]:
_lowerCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
_lowerCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_lowerCAmelCase , self.block_size , 0 ) , _lowerCAmelCase )
def _snake_case ( self ) -> str:
_lowerCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
_lowerCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_lowerCAmelCase , self.block_size , 0 ) , _lowerCAmelCase )
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase = "It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this."
_lowerCAmelCase , _lowerCAmelCase = process_story(_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , [] )
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = ""
_lowerCAmelCase , _lowerCAmelCase = process_story(_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , [] )
self.assertEqual(_lowerCAmelCase , [] )
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = (
"It was the year of Our Lord one thousand seven hundred and "
"seventy-five\n\nSpiritual revelations were conceded to England "
"at that favoured period, as at this.\n@highlight\n\nIt was the best of times"
)
_lowerCAmelCase , _lowerCAmelCase = process_story(_lowerCAmelCase )
_lowerCAmelCase = [
"It was the year of Our Lord one thousand seven hundred and seventy-five.",
"Spiritual revelations were conceded to England at that favoured period, as at this.",
]
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = ["It was the best of times."]
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = torch.tensor([1, 2, 3, 4] )
_lowerCAmelCase = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(_lowerCAmelCase , 0 ).numpy() , expected.numpy() )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
_lowerCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_lowerCAmelCase , 23 ).numpy() , expected.numpy() )
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
_lowerCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_lowerCAmelCase , 1 ).numpy() , expected.numpy() )
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase = 101
_lowerCAmelCase = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
_lowerCAmelCase = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
_lowerCAmelCase = compute_token_type_ids(_lowerCAmelCase , _lowerCAmelCase )
np.testing.assert_array_equal(_lowerCAmelCase , _lowerCAmelCase )
| 18 |
snake_case = {str(digit): digit**5 for digit in range(1_0)}
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> int:
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(snake_case__ ) )
def SCREAMING_SNAKE_CASE__ ( ) -> int:
return sum(
number
for number in range(1000 , 100_0000 )
if number == digits_fifth_powers_sum(snake_case__ ) )
if __name__ == "__main__":
print(solution()) | 67 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class _UpperCAmelCase( unittest.TestCase ):
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
_UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens]))
_UpperCamelCase = {
'''do_resize''': True,
'''size''': {'''height''': 2_24, '''width''': 2_24},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073],
'''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711],
'''do_convert_rgb''': True,
}
_UpperCamelCase = os.path.join(self.tmpdirname , __a)
with open(self.image_processor_file , '''w''' , encoding='''utf-8''') as fp:
json.dump(__a , __a)
def UpperCAmelCase ( self , **__a) -> List[Any]:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **__a)
def UpperCAmelCase ( self , **__a) -> Any:
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__a)
def UpperCAmelCase ( self , **__a) -> List[str]:
'''simple docstring'''
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **__a)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta)]
_UpperCamelCase = [Image.fromarray(np.moveaxis(__a , 0 , -1)) for x in image_inputs]
return image_inputs
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a)
processor_slow.save_pretrained(self.tmpdirname)
_UpperCamelCase = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__a)
_UpperCamelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a)
processor_fast.save_pretrained(self.tmpdirname)
_UpperCamelCase = ChineseCLIPProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , __a)
self.assertIsInstance(processor_fast.tokenizer , __a)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , __a)
self.assertIsInstance(processor_fast.image_processor , __a)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
_UpperCamelCase = self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''')
_UpperCamelCase = self.get_image_processor(do_normalize=__a)
_UpperCamelCase = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=__a)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , __a)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , __a)
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a)
_UpperCamelCase = self.prepare_image_inputs()
_UpperCamelCase = image_processor(__a , return_tensors='''np''')
_UpperCamelCase = processor(images=__a , return_tensors='''np''')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a)
_UpperCamelCase = '''Alexandra,T-shirt的价格是15便士。'''
_UpperCamelCase = processor(text=__a)
_UpperCamelCase = tokenizer(__a)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a)
_UpperCamelCase = '''Alexandra,T-shirt的价格是15便士。'''
_UpperCamelCase = self.prepare_image_inputs()
_UpperCamelCase = processor(text=__a , images=__a)
self.assertListEqual(list(inputs.keys()) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''])
# test if it raises when no input is passed
with pytest.raises(__a):
processor()
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a)
_UpperCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCamelCase = processor.batch_decode(__a)
_UpperCamelCase = tokenizer.batch_decode(__a)
self.assertListEqual(__a , __a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a)
_UpperCamelCase = '''Alexandra,T-shirt的价格是15便士。'''
_UpperCamelCase = self.prepare_image_inputs()
_UpperCamelCase = processor(text=__a , images=__a)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
| 19 |
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> list[int]:
_lowercase = str(snake_case__ )
_lowercase = [n]
for i in range(1 , len(snake_case__ ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> bool:
if len(str(snake_case__ ) ) > 3:
if not is_prime(int(str(snake_case__ )[-3:] ) ) or not is_prime(int(str(snake_case__ )[:3] ) ):
return False
return True
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int = 11 ) -> list[int]:
_lowercase = []
_lowercase = 13
while len(snake_case__ ) != count:
if validate(snake_case__ ):
_lowercase = list_truncated_nums(snake_case__ )
if all(is_prime(snake_case__ ) for i in list_nums ):
list_truncated_primes.append(snake_case__ )
num += 2
return list_truncated_primes
def SCREAMING_SNAKE_CASE__ ( ) -> int:
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F"""{sum(compute_truncated_primes(1_1)) = }""") | 67 | 0 |
class lowercase_ :
def __init__( self , lowercase_ , lowercase_=None , lowercase_=None) -> Tuple:
a__ =data
a__ =previous
a__ =next_node
def __str__( self) -> str:
return F"""{self.data}"""
def __UpperCamelCase ( self) -> int:
return self.data
def __UpperCamelCase ( self) -> Any:
return self.next
def __UpperCamelCase ( self) -> int:
return self.previous
class lowercase_ :
def __init__( self , lowercase_) -> str:
a__ =head
def __iter__( self) -> Union[str, Any]:
return self
def __UpperCamelCase ( self) -> str:
if not self.current:
raise StopIteration
else:
a__ =self.current.get_data()
a__ =self.current.get_next()
return value
class lowercase_ :
def __init__( self) -> Union[str, Any]:
a__ =None # First node in list
a__ =None # Last node in list
def __str__( self) -> Dict:
a__ =self.head
a__ =[]
while current is not None:
nodes.append(current.get_data())
a__ =current.get_next()
return " ".join(str(lowercase_) for node in nodes)
def __contains__( self , lowercase_) -> Any:
a__ =self.head
while current:
if current.get_data() == value:
return True
a__ =current.get_next()
return False
def __iter__( self) -> Any:
return LinkedListIterator(self.head)
def __UpperCamelCase ( self) -> List[str]:
if self.head:
return self.head.get_data()
return None
def __UpperCamelCase ( self) -> Optional[int]:
if self.tail:
return self.tail.get_data()
return None
def __UpperCamelCase ( self , lowercase_) -> None:
if self.head is None:
a__ =node
a__ =node
else:
self.insert_before_node(self.head , lowercase_)
def __UpperCamelCase ( self , lowercase_) -> None:
if self.head is None:
self.set_head(lowercase_)
else:
self.insert_after_node(self.tail , lowercase_)
def __UpperCamelCase ( self , lowercase_) -> None:
a__ =Node(lowercase_)
if self.head is None:
self.set_head(lowercase_)
else:
self.set_tail(lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> None:
a__ =node
a__ =node.previous
if node.get_previous() is None:
a__ =node_to_insert
else:
a__ =node_to_insert
a__ =node_to_insert
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> None:
a__ =node
a__ =node.next
if node.get_next() is None:
a__ =node_to_insert
else:
a__ =node_to_insert
a__ =node_to_insert
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> None:
a__ =1
a__ =Node(lowercase_)
a__ =self.head
while node:
if current_position == position:
self.insert_before_node(lowercase_ , lowercase_)
return
current_position += 1
a__ =node.next
self.insert_after_node(self.tail , lowercase_)
def __UpperCamelCase ( self , lowercase_) -> Node:
a__ =self.head
while node:
if node.get_data() == item:
return node
a__ =node.get_next()
raise Exception('Node not found')
def __UpperCamelCase ( self , lowercase_) -> Dict:
if (node := self.get_node(lowercase_)) is not None:
if node == self.head:
a__ =self.head.get_next()
if node == self.tail:
a__ =self.tail.get_previous()
self.remove_node_pointers(lowercase_)
@staticmethod
def __UpperCamelCase ( lowercase_) -> None:
if node.get_next():
a__ =node.previous
if node.get_previous():
a__ =node.next
a__ =None
a__ =None
def __UpperCamelCase ( self) -> Any:
return self.head is None
def _lowercase( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class A_ :
"""simple docstring"""
def __init__( self : Optional[Any] ,__A : Tuple ,__A : Any=99 ,__A : Any=13 ,__A : Dict=7 ,__A : List[Any]=9 ,__A : Dict=True ,__A : Any=True ,__A : Tuple=False ,__A : str=32 ,__A : int=5 ,__A : List[str]=4 ,__A : Optional[Any]=37 ,__A : int=8 ,__A : Any=0.1 ,__A : Dict=0.002 ,__A : Union[str, Any]=1 ,__A : Optional[Any]=0 ,__A : int=0 ,__A : Tuple=None ,__A : str=None ,) -> List[Any]:
_lowercase = parent
_lowercase = batch_size
_lowercase = encoder_seq_length
_lowercase = decoder_seq_length
# For common tests
_lowercase = self.decoder_seq_length
_lowercase = is_training
_lowercase = use_attention_mask
_lowercase = use_labels
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = d_ff
_lowercase = relative_attention_num_buckets
_lowercase = dropout_rate
_lowercase = initializer_factor
_lowercase = eos_token_id
_lowercase = pad_token_id
_lowercase = decoder_start_token_id
_lowercase = None
_lowercase = decoder_layers
def __UpperCAmelCase ( self : Dict ) -> Dict:
return TaConfig.from_pretrained('google/umt5-base' )
def __UpperCAmelCase ( self : Optional[int] ,__A : Optional[int] ,__A : int ,__A : str ,__A : List[str]=None ,__A : List[str]=None ,__A : Any=None ,__A : List[Any]=None ,__A : str=None ,) -> Tuple:
if attention_mask is None:
_lowercase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_lowercase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_lowercase = torch.ones(config.num_hidden_layers ,config.num_attention_heads ,device=__A )
if decoder_head_mask is None:
_lowercase = torch.ones(config.num_decoder_layers ,config.num_attention_heads ,device=__A )
if cross_attn_head_mask is None:
_lowercase = torch.ones(
config.num_decoder_layers ,config.num_attention_heads ,device=__A )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def __UpperCAmelCase ( self : Union[str, Any] ) -> int:
_lowercase = ids_tensor([self.batch_size, self.encoder_seq_length] ,self.vocab_size )
_lowercase = ids_tensor([self.batch_size, self.decoder_seq_length] ,self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_lowercase = input_ids.clamp(self.pad_token_id + 1 )
_lowercase = decoder_input_ids.clamp(self.pad_token_id + 1 )
_lowercase = self.get_config()
_lowercase = config.num_attention_heads
_lowercase = self.prepare_inputs_dict(__A ,__A ,__A )
return config, input_dict
def __UpperCAmelCase ( self : Dict ) -> str:
_lowercase , _lowercase = self.prepare_config_and_inputs()
return config, inputs_dict
def __UpperCAmelCase ( self : Dict ) -> Tuple:
return TaConfig(
vocab_size=166 ,d_model=self.hidden_size ,d_ff=self.d_ff ,d_kv=self.hidden_size // self.num_attention_heads ,num_layers=self.num_hidden_layers ,num_decoder_layers=self.decoder_layers ,num_heads=self.num_attention_heads ,relative_attention_num_buckets=self.relative_attention_num_buckets ,dropout_rate=self.dropout_rate ,initializer_factor=self.initializer_factor ,eos_token_id=self.eos_token_id ,bos_token_id=self.pad_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.decoder_start_token_id ,)
def __UpperCAmelCase ( self : Dict ) -> Any:
return TaConfig(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,d_ff=self.d_ff ,d_kv=self.hidden_size // self.num_attention_heads ,num_layers=self.num_hidden_layers ,num_decoder_layers=self.decoder_layers ,num_heads=self.num_attention_heads ,relative_attention_num_buckets=self.relative_attention_num_buckets ,dropout_rate=self.dropout_rate ,initializer_factor=self.initializer_factor ,eos_token_id=self.eos_token_id ,bos_token_id=self.pad_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.decoder_start_token_id ,)
def __UpperCAmelCase ( self : Union[str, Any] ,__A : List[str] ,__A : Dict ,__A : List[str] ,__A : List[Any] ,__A : Tuple ,__A : int ,) -> Tuple:
_lowercase = UMTaModel(config=__A )
model.to(__A )
model.eval()
_lowercase = model(
input_ids=__A ,decoder_input_ids=__A ,attention_mask=__A ,decoder_attention_mask=__A ,)
_lowercase = model(input_ids=__A ,decoder_input_ids=__A )
_lowercase = result.last_hidden_state
_lowercase = result.past_key_values
_lowercase = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() ,(self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() ,(self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(__A ) ,config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) ,4 )
def __UpperCAmelCase ( self : List[Any] ,__A : Tuple ,__A : int ,__A : Any ,__A : Tuple ,__A : Any ,__A : Optional[int] ,) -> List[str]:
_lowercase = UMTaModel(config=__A ).get_decoder().to(__A ).eval()
# first forward pass
_lowercase = model(__A ,use_cache=__A )
_lowercase = model(__A )
_lowercase = model(__A ,use_cache=__A )
self.parent.assertTrue(len(__A ) == len(__A ) )
self.parent.assertTrue(len(__A ) == len(__A ) + 1 )
_lowercase , _lowercase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowercase = ids_tensor((self.batch_size, 1) ,config.vocab_size )
# append to next input_ids and
_lowercase = torch.cat([input_ids, next_tokens] ,dim=-1 )
_lowercase = model(__A )['last_hidden_state']
_lowercase = model(__A ,past_key_values=__A )['last_hidden_state']
# select random slice
_lowercase = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
_lowercase = output_from_no_past[:, -1, random_slice_idx].detach()
_lowercase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__A ,__A ,atol=1e-3 ) )
def __UpperCAmelCase ( self : Any ,__A : List[str] ,__A : List[str] ,) -> int:
_lowercase = UMTaModel(config=__A ).to(__A ).half().eval()
_lowercase = model(**__A )['last_hidden_state']
self.parent.assertFalse(torch.isnan(__A ).any().item() )
@require_torch
class A_ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE_ : Tuple = (UMTaForConditionalGeneration,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : str = (
{
'''conversational''': UMTaForConditionalGeneration,
'''feature-extraction''': UMTaModel,
'''summarization''': UMTaForConditionalGeneration,
'''text2text-generation''': UMTaForConditionalGeneration,
'''translation''': UMTaForConditionalGeneration,
'''question-answering''': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
SCREAMING_SNAKE_CASE_ : int = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : List[Any] = True
SCREAMING_SNAKE_CASE_ : int = True
# The small UMT5 model needs higher percentages for CPU/MP tests
SCREAMING_SNAKE_CASE_ : Dict = [0.8, 0.9]
def __UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
_lowercase = UMTaModelTester(self )
@unittest.skip('Test has a segmentation fault on torch 1.8.0' )
def __UpperCAmelCase ( self : int ) -> str:
_lowercase = self.model_tester.prepare_config_and_inputs()
_lowercase = UMTaModel(config_and_inputs[0] ).to(__A )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
__A ,(config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) ,F"""{tmpdirname}/t5_test.onnx""" ,export_params=__A ,opset_version=9 ,input_names=['input_ids', 'decoder_input_ids'] ,)
@unittest.skipIf(torch_device == 'cpu' ,'Cant do half precision' )
def __UpperCAmelCase ( self : List[Any] ) -> str:
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*__A )
def __UpperCAmelCase ( self : List[str] ) -> int:
_lowercase = ['encoder_attentions', 'decoder_attentions', 'cross_attentions']
_lowercase = self.model_tester.prepare_config_and_inputs()
_lowercase = config_and_inputs[0]
_lowercase = UMTaForConditionalGeneration(__A ).eval()
model.to(__A )
_lowercase = {
'head_mask': torch.zeros(config.num_layers ,config.num_heads ,device=__A ),
'decoder_head_mask': torch.zeros(config.num_decoder_layers ,config.num_heads ,device=__A ),
'cross_attn_head_mask': torch.zeros(config.num_decoder_layers ,config.num_heads ,device=__A ),
}
for attn_name, (name, mask) in zip(__A ,head_masking.items() ):
_lowercase = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
_lowercase = torch.ones(
config.num_decoder_layers ,config.num_heads ,device=__A )
_lowercase = model.generate(
config_and_inputs[1]['input_ids'] ,num_beams=1 ,max_length=3 ,output_attentions=__A ,return_dict_in_generate=__A ,**__A ,)
# We check the state of decoder_attentions and cross_attentions just from the last step
_lowercase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) ,0.0 )
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.' )
def __UpperCAmelCase ( self : str ) -> List[Any]:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class A_ ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged' )
def __UpperCAmelCase ( self : int ) -> List[str]:
_lowercase = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' ,return_dict=__A ).to(__A )
_lowercase = AutoTokenizer.from_pretrained('google/umt5-small' ,use_fast=__A ,legacy=__A )
_lowercase = [
'Bonjour monsieur <extra_id_0> bien <extra_id_1>.',
'No se como puedo <extra_id_0>.',
'This is the reason why we <extra_id_0> them.',
'The <extra_id_0> walks in <extra_id_1>, seats',
'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.',
]
_lowercase = tokenizer(__A ,return_tensors='pt' ,padding=__A ).input_ids
# fmt: off
_lowercase = torch.tensor(
[
[ 3_8530, 21_0703, 25_6299, 1410, 25_6298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 2_5922, 25_6299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 1_9014, 1_0620, 758, 25_6299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 25_6299, 1_4869, 281, 301, 25_6298, 275, 11_9983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 25_6299, 1_4869, 281, 2234, 289, 2275, 333,6_1391, 289, 25_6298, 543, 25_6297, 16_8714, 329, 25_6296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(__A ,__A )
_lowercase = model.generate(input_ids.to(__A ) )
_lowercase = [
'<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>',
'<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
]
_lowercase = tokenizer.batch_decode(__A )
self.assertEqual(__A ,__A ) | 67 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __A ( unittest.TestCase ):
def __init__( self :Tuple , __snake_case :str , __snake_case :List[Any]=7 , __snake_case :Optional[int]=3 , __snake_case :List[str]=18 , __snake_case :Optional[int]=30 , __snake_case :str=4_00 , __snake_case :Dict=True , __snake_case :Optional[Any]=None , __snake_case :List[Any]=True , ):
'''simple docstring'''
__magic_name__ : Tuple =size if size is not None else {"""height""": 18, """width""": 18}
__magic_name__ : List[Any] =parent
__magic_name__ : Any =batch_size
__magic_name__ : str =num_channels
__magic_name__ : List[str] =image_size
__magic_name__ : str =min_resolution
__magic_name__ : Union[str, Any] =max_resolution
__magic_name__ : Tuple =do_resize
__magic_name__ : Optional[Any] =size
__magic_name__ : Dict =apply_ocr
def A__ ( self :Any ):
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __A ( UpperCamelCase__ , unittest.TestCase ):
UpperCamelCase = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def A__ ( self :Optional[int] ):
'''simple docstring'''
__magic_name__ : Dict =LayoutLMvaImageProcessingTester(self )
@property
def A__ ( self :Dict ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ : Tuple =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case , """do_resize""" ) )
self.assertTrue(hasattr(__snake_case , """size""" ) )
self.assertTrue(hasattr(__snake_case , """apply_ocr""" ) )
def A__ ( self :int ):
'''simple docstring'''
__magic_name__ : Dict =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
__magic_name__ : Tuple =self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def A__ ( self :str ):
'''simple docstring'''
pass
def A__ ( self :Dict ):
'''simple docstring'''
__magic_name__ : List[str] =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ : Union[str, Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , Image.Image )
# Test not batched input
__magic_name__ : Optional[Any] =image_processing(image_inputs[0] , return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
self.assertIsInstance(encoding.words , __snake_case )
self.assertIsInstance(encoding.boxes , __snake_case )
# Test batched
__magic_name__ : Optional[int] =image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def A__ ( self :Dict ):
'''simple docstring'''
__magic_name__ : Optional[int] =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__magic_name__ : Union[str, Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , numpify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , np.ndarray )
# Test not batched input
__magic_name__ : str =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__magic_name__ : Dict =image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def A__ ( self :Dict ):
'''simple docstring'''
__magic_name__ : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ : Dict =prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , torchify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , torch.Tensor )
# Test not batched input
__magic_name__ : Optional[Any] =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__magic_name__ : Union[str, Any] =image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def A__ ( self :int ):
'''simple docstring'''
__magic_name__ : int =LayoutLMvaImageProcessor()
from datasets import load_dataset
__magic_name__ : Union[str, Any] =load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""" )
__magic_name__ : Dict =Image.open(ds[0]["""file"""] ).convert("""RGB""" )
__magic_name__ : str =image_processing(__snake_case , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__magic_name__ : Tuple =[["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
__magic_name__ : Any =[[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __snake_case )
self.assertListEqual(encoding.boxes , __snake_case )
# with apply_OCR = False
__magic_name__ : Dict =LayoutLMvaImageProcessor(apply_ocr=__snake_case )
__magic_name__ : Union[str, Any] =image_processing(__snake_case , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
| 21 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue_model_parallelism.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1_6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1_6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
] )
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Any ) -> Optional[int]:
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() ,encoding='utf-8' ,check=__A ,)
assert hasattr(self ,'env' )
def __UpperCAmelCase ( self : str ,__A : Tuple ) -> int:
# configuration for running training on smdistributed Model Parallel
_lowercase = {
'enabled': True,
'processes_per_host': 8,
}
_lowercase = {
'enabled': True,
'parameters': {
'microbatches': 4,
'placement_strategy': 'spread',
'pipeline': 'interleaved',
'optimize': 'speed',
'partitions': 4,
'ddp': True,
},
}
_lowercase = {'smdistributed': {'modelparallel': smp_options}, 'mpi': mpi_options}
_lowercase = 'trainer' if self.script == 'run_glue.py' else 'smtrainer'
# creates estimator
return HuggingFace(
entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" ,instance_count=__A ,instance_type=self.instance_type ,debugger_hook_config=__A ,hyperparameters={
**self.env.hyperparameters,
'model_name_or_path': self.model_name_or_path,
'max_steps': 500,
} ,metric_definitions=self.env.metric_definitions ,distribution=__A ,py_version='py36' ,)
def __UpperCAmelCase ( self : List[Any] ,__A : Any ) -> Optional[Any]:
TrainingJobAnalytics(__A ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def __UpperCAmelCase ( self : Tuple ,__A : Union[str, Any] ) -> Optional[Any]:
# create estimator
_lowercase = self.create_estimator(__A )
# run training
estimator.fit()
# result dataframe
_lowercase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_lowercase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
_lowercase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_lowercase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' ,99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" ,'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} ,__A ) | 67 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class A :
def __init__( self : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any]=12 , lowerCAmelCase_ : str=7 , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : Dict=99 , lowerCAmelCase_ : Tuple=32 , lowerCAmelCase_ : Dict=32 , lowerCAmelCase_ : int=2 , lowerCAmelCase_ : str=4 , lowerCAmelCase_ : List[str]=37 , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : Union[str, Any]=0.1 , lowerCAmelCase_ : List[Any]=5_12 , lowerCAmelCase_ : Dict=0.0_2 , lowerCAmelCase_ : str=0 , lowerCAmelCase_ : Dict=None , ) -> Optional[int]:
"""simple docstring"""
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_input_mask
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = projection_dim
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = dropout
_a = attention_dropout
_a = max_position_embeddings
_a = initializer_range
_a = scope
_a = bos_token_id
def __lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_input_mask:
_a = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
_a = input_mask.numpy()
_a , _a = input_mask.shape
_a = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCAmelCase_ ):
_a = 1
_a = 0
_a = self.get_config()
return config, input_ids, tf.convert_to_tensor(lowerCAmelCase_ )
def __lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def __lowerCAmelCase ( self : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] ) -> int:
"""simple docstring"""
_a = TFBlipTextModel(config=lowerCAmelCase_ )
_a = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , training=lowerCAmelCase_ )
_a = model(lowerCAmelCase_ , training=lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
_a , _a , _a = config_and_inputs
_a = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class A ( _a ,unittest.TestCase ):
lowercase_ = (TFBlipTextModel,) if is_tf_available() else ()
lowercase_ = False
lowercase_ = False
lowercase_ = False
def __lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
_a = BlipTextModelTester(self )
_a = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def __lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
def __lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason='''Blip does not use inputs_embeds''' )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def __lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def __lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
pass
@slow
def __lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = TFBlipTextModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : Union[str, Any]=True ) -> str:
"""simple docstring"""
super().test_pt_tf_model_equivalence(allow_missing_keys=lowerCAmelCase_ )
| 22 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json""",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''blenderbot-small'''
SCREAMING_SNAKE_CASE_ : int = ['''past_key_values''']
SCREAMING_SNAKE_CASE_ : Tuple = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[str] ,__A : List[Any]=5_0265 ,__A : str=512 ,__A : Optional[int]=8 ,__A : Any=2048 ,__A : Tuple=16 ,__A : str=8 ,__A : int=2048 ,__A : List[str]=16 ,__A : Optional[int]=0.0 ,__A : Any=0.0 ,__A : int=True ,__A : List[Any]=True ,__A : Tuple="gelu" ,__A : Any=512 ,__A : Dict=0.1 ,__A : Tuple=0.0 ,__A : int=0.0 ,__A : int=0.02 ,__A : Dict=1 ,__A : str=False ,__A : Dict=0 ,__A : Union[str, Any]=1 ,__A : Optional[int]=2 ,__A : List[str]=2 ,**__A : Tuple ,) -> Tuple:
_lowercase = vocab_size
_lowercase = max_position_embeddings
_lowercase = d_model
_lowercase = encoder_ffn_dim
_lowercase = encoder_layers
_lowercase = encoder_attention_heads
_lowercase = decoder_ffn_dim
_lowercase = decoder_layers
_lowercase = decoder_attention_heads
_lowercase = dropout
_lowercase = attention_dropout
_lowercase = activation_dropout
_lowercase = activation_function
_lowercase = init_std
_lowercase = encoder_layerdrop
_lowercase = decoder_layerdrop
_lowercase = use_cache
_lowercase = encoder_layers
_lowercase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__A ,bos_token_id=__A ,eos_token_id=__A ,is_encoder_decoder=__A ,decoder_start_token_id=__A ,forced_eos_token_id=__A ,**__A ,)
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_lowercase = {0: 'batch'}
_lowercase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
_lowercase = {0: 'batch', 1: 'decoder_sequence'}
_lowercase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__A ,direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
_lowercase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_lowercase , _lowercase = self.num_layers
for i in range(__A ):
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
else:
_lowercase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def __UpperCAmelCase ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = super().outputs
else:
_lowercase = super(__A ,self ).outputs
if self.use_past:
_lowercase , _lowercase = self.num_layers
for i in range(__A ):
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def __UpperCAmelCase ( self : Optional[int] ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,__A ,__A ,__A ,__A )
# Generate decoder inputs
_lowercase = seq_length if not self.use_past else 1
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,__A ,__A ,__A ,__A )
_lowercase = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
_lowercase = dict(**__A ,**__A )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowercase , _lowercase = common_inputs['input_ids'].shape
_lowercase = common_inputs['decoder_input_ids'].shape[1]
_lowercase , _lowercase = self.num_attention_heads
_lowercase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowercase = decoder_seq_length + 3
_lowercase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_lowercase = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(__A ,__A )] ,dim=1 )
_lowercase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_lowercase , _lowercase = self.num_layers
_lowercase = min(__A ,__A )
_lowercase = max(__A ,__A ) - min_num_layers
_lowercase = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(__A ):
common_inputs["past_key_values"].append(
(
torch.zeros(__A ),
torch.zeros(__A ),
torch.zeros(__A ),
torch.zeros(__A ),
) )
# TODO: test this.
_lowercase = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(__A ,__A ):
common_inputs["past_key_values"].append((torch.zeros(__A ), torch.zeros(__A )) )
return common_inputs
def __UpperCAmelCase ( self : List[Any] ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,__A ,__A ,__A ,__A )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowercase , _lowercase = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_lowercase = seqlen + 2
_lowercase , _lowercase = self.num_layers
_lowercase , _lowercase = self.num_attention_heads
_lowercase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowercase = common_inputs['attention_mask'].dtype
_lowercase = torch.cat(
[common_inputs['attention_mask'], torch.ones(__A ,__A ,dtype=__A )] ,dim=1 )
_lowercase = [
(torch.zeros(__A ), torch.zeros(__A )) for _ in range(__A )
]
return common_inputs
def __UpperCAmelCase ( self : Any ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowercase = compute_effective_axis_dimension(
__A ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowercase = tokenizer.num_special_tokens_to_add(__A )
_lowercase = compute_effective_axis_dimension(
__A ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=__A )
# Generate dummy inputs according to compute batch and sequence
_lowercase = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
_lowercase = dict(tokenizer(__A ,return_tensors=__A ) )
return common_inputs
def __UpperCAmelCase ( self : Dict ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A )
elif self.task == "causal-lm":
_lowercase = self._generate_dummy_inputs_for_causal_lm(
__A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A )
else:
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A )
return common_inputs
def __UpperCAmelCase ( self : List[str] ,__A : Dict ,__A : Any ,__A : List[Any] ,__A : Tuple ) -> Union[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = super()._flatten_past_key_values_(__A ,__A ,__A ,__A )
else:
_lowercase = super(__A ,self )._flatten_past_key_values_(
__A ,__A ,__A ,__A ) | 67 | 0 |
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
snake_case__ : int = """\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
"""
snake_case__ : Dict = """\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
"""
snake_case__ : Dict = """
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for 'record': list of question-answer dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'prediction_text': the predicted answer text
- for 'multirc': list of question-answer dictionaries with the following keys:
- 'idx': index of the question-answer pair as specified by the dataset
- 'prediction': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for 'record': list of question-answers dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'answers': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for 'record':
- 'exact_match': Exact match between answer and gold answer
- 'f1': F1 score
- for 'multirc':
- 'exact_match': Exact match between answer and gold answer
- 'f1_m': Per-question macro-F1 score
- 'f1_a': Average F1 score over all answers
- for 'axb':
'matthews_correlation': Matthew Correlation
- for 'cb':
- 'accuracy': Accuracy
- 'f1': F1 score
- for all others:
- 'accuracy': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'cb')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'record')
>>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]
>>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')
>>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'axb')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def _snake_case (__lowercase , __lowercase):
return float((preds == labels).mean())
def _snake_case (__lowercase , __lowercase , __lowercase="binary"):
UpperCamelCase_ = simple_accuracy(__lowercase , __lowercase)
UpperCamelCase_ = float(fa_score(y_true=__lowercase , y_pred=__lowercase , average=__lowercase))
return {
"accuracy": acc,
"f1": fa,
}
def _snake_case (__lowercase , __lowercase):
UpperCamelCase_ = {}
for id_pred, label in zip(__lowercase , __lowercase):
UpperCamelCase_ = f"""{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}"""
UpperCamelCase_ = id_pred['prediction']
if question_id in question_map:
question_map[question_id].append((pred, label))
else:
UpperCamelCase_ = [(pred, label)]
UpperCamelCase_ , UpperCamelCase_ = [], []
for question, preds_labels in question_map.items():
UpperCamelCase_ , UpperCamelCase_ = zip(*__lowercase)
UpperCamelCase_ = fa_score(y_true=__lowercase , y_pred=__lowercase , average='macro')
fas.append(__lowercase)
UpperCamelCase_ = int(sum(pred == label for pred, label in preds_labels) == len(__lowercase))
ems.append(__lowercase)
UpperCamelCase_ = float(sum(__lowercase) / len(__lowercase))
UpperCamelCase_ = sum(__lowercase) / len(__lowercase)
UpperCamelCase_ = float(fa_score(y_true=__lowercase , y_pred=[id_pred['prediction'] for id_pred in ids_preds]))
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Optional[Any]:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='numpy' if not self.config_name == 'record' and not self.config_name == 'multirc' else None , )
def _UpperCAmelCase ( self ) -> str:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"prediction_text": datasets.Value('string' ),
},
"references": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"answers": datasets.Sequence(datasets.Value('string' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('int64' ),
"paragraph": datasets.Value('int64' ),
"question": datasets.Value('int64' ),
},
"prediction": datasets.Value('int64' ),
},
"references": datasets.Value('int64' ),
}
else:
return {
"predictions": datasets.Value('int64' ),
"references": datasets.Value('int64' ),
}
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(_UpperCAmelCase , _UpperCAmelCase )}
elif self.config_name == "cb":
return acc_and_fa(_UpperCAmelCase , _UpperCAmelCase , fa_avg='macro' )
elif self.config_name == "record":
UpperCamelCase_ = [
{
'qas': [
{'id': ref['idx']['query'], 'answers': [{'text': ans} for ans in ref['answers']]}
for ref in references
]
}
]
UpperCamelCase_ = {pred['idx']['query']: pred['prediction_text'] for pred in predictions}
return evaluate_record(_UpperCAmelCase , _UpperCAmelCase )[0]
elif self.config_name == "multirc":
return evaluate_multirc(_UpperCAmelCase , _UpperCAmelCase )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(_UpperCAmelCase , _UpperCAmelCase )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
| 23 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : Any ) -> str:
torch.manual_seed(0 )
_lowercase = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=('DownBlock2D', 'AttnDownBlock2D') ,up_block_types=('AttnUpBlock2D', 'UpBlock2D') ,)
return model
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
_lowercase = self.dummy_uncond_unet
_lowercase = KarrasVeScheduler()
_lowercase = KarrasVePipeline(unet=__A ,scheduler=__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_lowercase = torch.manual_seed(0 )
_lowercase = pipe(num_inference_steps=2 ,generator=__A ,output_type='numpy' ).images
_lowercase = torch.manual_seed(0 )
_lowercase = pipe(num_inference_steps=2 ,generator=__A ,output_type='numpy' ,return_dict=__A )[0]
_lowercase = image[0, -3:, -3:, -1]
_lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowercase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
_lowercase = 'google/ncsnpp-celebahq-256'
_lowercase = UNetaDModel.from_pretrained(__A )
_lowercase = KarrasVeScheduler()
_lowercase = KarrasVePipeline(unet=__A ,scheduler=__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_lowercase = torch.manual_seed(0 )
_lowercase = pipe(num_inference_steps=20 ,generator=__A ,output_type='numpy' ).images
_lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_lowercase = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 67 | 0 |
'''simple docstring'''
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def _UpperCamelCase (_lowerCamelCase : Tuple )-> List[str]:
'''simple docstring'''
__snake_case = int(_lowerCamelCase )
__snake_case , __snake_case , __snake_case = t // 36_00, (t // 60) % 60, t % 60
return f'''{h}:{m:02d}:{s:02d}''' if h != 0 else f'''{m:02d}:{s:02d}'''
def _UpperCamelCase (_lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : List[Any]=3_00 )-> int:
'''simple docstring'''
return f'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def _UpperCamelCase (_lowerCamelCase : int )-> List[Any]:
'''simple docstring'''
__snake_case = '''<table border="1" class="dataframe">\n'''
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
__snake_case = f'''{elt:.6f}''' if isinstance(_lowerCamelCase , _lowerCamelCase ) else str(_lowerCamelCase )
html_code += f''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class lowerCAmelCase :
__lowercase : str = 5
__lowercase : Optional[Any] = 0.2
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 300 , ) -> List[Any]:
'''simple docstring'''
__snake_case = total
__snake_case = '''''' if prefix is None else prefix
__snake_case = leave
__snake_case = parent
__snake_case = width
__snake_case = None
__snake_case = None
__snake_case = None
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None ) -> Any:
'''simple docstring'''
__snake_case = value
if comment is not None:
__snake_case = comment
if self.last_value is None:
__snake_case = __snake_case = time.time()
__snake_case = __snake_case = value
__snake_case = __snake_case = None
__snake_case = self.warmup
__snake_case = 1
self.update_bar(__SCREAMING_SNAKE_CASE )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
__snake_case = time.time()
__snake_case = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
__snake_case = self.elapsed_time / (value - self.start_value)
else:
__snake_case = None
if value >= self.total:
__snake_case = self.total
__snake_case = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
__snake_case = self.average_time_per_item * (self.total - value)
self.update_bar(__SCREAMING_SNAKE_CASE )
__snake_case = value
__snake_case = current_time
if self.average_time_per_item is None:
__snake_case = 1
else:
__snake_case = max(int(self.update_every / self.average_time_per_item ) , 1 )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> List[str]:
'''simple docstring'''
__snake_case = ''' ''' * (len(str(self.total ) ) - len(str(__SCREAMING_SNAKE_CASE ) )) + str(__SCREAMING_SNAKE_CASE )
if self.elapsed_time is None:
__snake_case = F'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
__snake_case = F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'''
else:
__snake_case = (
F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'''
F''' {format_time(self.predicted_remaining )}'''
)
self.label += F''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F''', {self.comment}]'''
self.display()
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
__snake_case = disp.display(disp.HTML(self.html_code ) , display_id=__SCREAMING_SNAKE_CASE )
else:
self.output.update(disp.HTML(self.html_code ) )
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('''''' ) )
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> Any:
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE )
__snake_case = None if column_names is None else [column_names]
__snake_case = None
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
__snake_case = disp.display(disp.HTML(self.html_code ) , display_id=__SCREAMING_SNAKE_CASE )
else:
self.output.update(disp.HTML(self.html_code ) )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
if self.inner_table is None:
__snake_case = [list(values.keys() ), list(values.values() )]
else:
__snake_case = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(__SCREAMING_SNAKE_CASE )
__snake_case = columns
self.inner_table.append([values[c] for c in columns] )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=300 ) -> List[str]:
'''simple docstring'''
__snake_case = NotebookProgressBar(__SCREAMING_SNAKE_CASE , prefix=__SCREAMING_SNAKE_CASE , parent=self , width=__SCREAMING_SNAKE_CASE )
return self.child_bar
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = None
self.display()
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self ) -> str:
'''simple docstring'''
__snake_case = None
__snake_case = None
__snake_case = False
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
__snake_case = '''Epoch''' if args.evaluation_strategy == IntervalStrategy.EPOCH else '''Step'''
__snake_case = 0
__snake_case = 0
__snake_case = [self.first_column] + ['''Training Loss''']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('''Validation Loss''' )
__snake_case = NotebookTrainingTracker(state.max_steps , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
__snake_case = int(state.epoch ) if int(state.epoch ) == state.epoch else F'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1 , comment=F'''Epoch {epoch}/{state.num_train_epochs}''' , force_update=self._force_next_update , )
__snake_case = False
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
if not has_length(__SCREAMING_SNAKE_CASE ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
__snake_case = self.training_tracker.add_child(len(__SCREAMING_SNAKE_CASE ) )
else:
__snake_case = NotebookProgressBar(len(__SCREAMING_SNAKE_CASE ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
if self.prediction_bar is not None:
self.prediction_bar.close()
__snake_case = None
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
__snake_case = {'''Training Loss''': logs['''loss''']}
# First column is necessarily Step sine we're not in epoch eval strategy
__snake_case = state.global_step
self.training_tracker.write_line(__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
if self.training_tracker is not None:
__snake_case = {'''Training Loss''': '''No log''', '''Validation Loss''': '''No log'''}
for log in reversed(state.log_history ):
if "loss" in log:
__snake_case = log['''loss''']
break
if self.first_column == "Epoch":
__snake_case = int(state.epoch )
else:
__snake_case = state.global_step
__snake_case = '''eval'''
for k in metrics:
if k.endswith('''_loss''' ):
__snake_case = re.sub(r'''\_loss$''' , '''''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop('''total_flos''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop('''epoch''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_runtime''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_samples_per_second''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_steps_per_second''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_jit_compilation_time''' , __SCREAMING_SNAKE_CASE )
for k, v in metrics.items():
if k == F'''{metric_key_prefix}_loss''':
__snake_case = v
else:
__snake_case = k.split('''_''' )
__snake_case = ''' '''.join([part.capitalize() for part in splits[1:]] )
__snake_case = v
self.training_tracker.write_line(__SCREAMING_SNAKE_CASE )
self.training_tracker.remove_child()
__snake_case = None
# Evaluation takes a long time so we should force the next update.
__snake_case = True
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
self.training_tracker.update(
state.global_step , comment=F'''Epoch {int(state.epoch )}/{state.num_train_epochs}''' , force_update=__SCREAMING_SNAKE_CASE )
__snake_case = None
| 24 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str , snake_case__ :str ) -> list:
_lowercase = len(snake_case__ )
_lowercase = []
for i in range(len(snake_case__ ) - pat_len + 1 ):
_lowercase = True
for j in range(snake_case__ ):
if s[i + j] != pattern[j]:
_lowercase = False
break
if match_found:
position.append(snake_case__ )
return position
if __name__ == "__main__":
assert naive_pattern_search("""ABCDEFG""", """DE""") == [3]
print(naive_pattern_search("""ABAAABCDBBABCDDEBCABC""", """ABC""")) | 67 | 0 |
import datasets
from .evaluate import evaluate
a_ = '\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n'
a_ = '\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n'
a_ = '\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric("cuad")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCamelCase ( datasets.Metric ):
'''simple docstring'''
def __UpperCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {
"id": datasets.Value("string" ),
"prediction_text": datasets.features.Sequence(datasets.Value("string" ) ),
},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://www.atticusprojectai.org/cuad"] , reference_urls=["https://www.atticusprojectai.org/cuad"] , )
def __UpperCamelCase ( self : int , a : Any , a : Any ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
SCREAMING_SNAKE_CASE : List[str] = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
SCREAMING_SNAKE_CASE : Tuple = evaluate(dataset=a , predictions=a )
return score | 25 |
from typing import Any
import numpy as np
def SCREAMING_SNAKE_CASE__ ( snake_case__ :np.ndarray ) -> bool:
return np.array_equal(snake_case__ , matrix.conjugate().T )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :np.ndarray , snake_case__ :np.ndarray ) -> Any:
_lowercase = v.conjugate().T
_lowercase = v_star.dot(snake_case__ )
assert isinstance(snake_case__ , np.ndarray )
return (v_star_dot.dot(snake_case__ )) / (v_star.dot(snake_case__ ))
def SCREAMING_SNAKE_CASE__ ( ) -> None:
_lowercase = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
_lowercase = np.array([[1], [2], [3]] )
assert is_hermitian(snake_case__ ), F"""{a} is not hermitian."""
print(rayleigh_quotient(snake_case__ , snake_case__ ) )
_lowercase = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(snake_case__ ), F"""{a} is not hermitian."""
assert rayleigh_quotient(snake_case__ , snake_case__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests() | 67 | 0 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , ) -> List[str]:
"""simple docstring"""
__snake_case : str = {}
if train_file is not None:
__snake_case : Any = [train_file]
if eval_file is not None:
__snake_case : Optional[Any] = [eval_file]
if test_file is not None:
__snake_case : Optional[int] = [test_file]
__snake_case : Union[str, Any] = datasets.load_dataset("""csv""" , data_files=_lowerCamelCase )
__snake_case : Tuple = list(ds[list(files.keys() )[0]].features.keys() )
__snake_case : List[Any] = features_name.pop(_lowerCamelCase )
__snake_case : str = list(set(ds[list(files.keys() )[0]][label_name] ) )
__snake_case : Optional[int] = {label: i for i, label in enumerate(_lowerCamelCase )}
__snake_case : Tuple = tokenizer.model_input_names
__snake_case : List[Any] = {}
if len(_lowerCamelCase ) == 1:
for k in files.keys():
__snake_case : List[str] = ds[k].map(
lambda _lowerCamelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding="""max_length""" ) , batched=_lowerCamelCase , )
elif len(_lowerCamelCase ) == 2:
for k in files.keys():
__snake_case : Optional[Any] = ds[k].map(
lambda _lowerCamelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding="""max_length""" , ) , batched=_lowerCamelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__snake_case : Union[str, Any] = {k: v for k, v in ex.items() if k in input_names}
__snake_case : str = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__snake_case : Dict = {k: v for k, v in ex.items() if k in input_names}
__snake_case : Optional[int] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__snake_case : List[str] = {k: v for k, v in ex.items() if k in input_names}
__snake_case : Tuple = labelaid[ex[label_name]]
yield (d, label)
__snake_case : int = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__snake_case : List[str] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__snake_case : List[Any] = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__snake_case : List[Any] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__snake_case : Optional[int] = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__snake_case : Tuple = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
__UpperCamelCase = logging.getLogger(__name__)
@dataclass
class _A :
lowercase__: int = field(metadata={'''help''': '''Which column contains the label'''} )
lowercase__: str = field(default=__lowercase , metadata={'''help''': '''The path of the training file'''} )
lowercase__: Optional[str] = field(default=__lowercase , metadata={'''help''': '''The path of the development file'''} )
lowercase__: Optional[str] = field(default=__lowercase , metadata={'''help''': '''The path of the test file'''} )
lowercase__: int = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
lowercase__: bool = field(
default=__lowercase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class _A :
lowercase__: str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
lowercase__: Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowercase__: Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
lowercase__: bool = field(default=__lowercase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowercase__: Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def _a ( ) -> List[str]:
"""simple docstring"""
__snake_case : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__snake_case , __snake_case , __snake_case : Tuple = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.info(
F'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '''
F'''16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__snake_case : Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__snake_case , __snake_case , __snake_case , __snake_case : List[str] = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=_lowerCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
__snake_case : str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(_lowerCamelCase ) , labelaid=_lowerCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="""text-classification""" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
__snake_case : int = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(""".bin""" in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(_lowerCamelCase ) -> Dict:
__snake_case : List[str] = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__snake_case : Tuple = TFTrainer(
model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=_lowerCamelCase , eval_dataset=_lowerCamelCase , compute_metrics=_lowerCamelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__snake_case : Dict = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__snake_case : List[str] = trainer.evaluate()
__snake_case : Tuple = os.path.join(training_args.output_dir , """eval_results.txt""" )
with open(_lowerCamelCase , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(F''' {key} = {value}''' )
writer.write(F'''{key} = {value}\n''' )
results.update(_lowerCamelCase )
return results
if __name__ == "__main__":
main()
| 26 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple ,__A : Dict ,__A : List[Any]=7 ,__A : Dict=3 ,__A : Tuple=30 ,__A : Dict=400 ,__A : Any=True ,__A : List[Any]=None ,__A : Any=True ,__A : List[str]=[0.5, 0.5, 0.5] ,__A : Union[str, Any]=[0.5, 0.5, 0.5] ,__A : int=True ,__A : List[str]=1 / 255 ,__A : Union[str, Any]=True ,) -> List[Any]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_lowercase = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
_lowercase = parent
_lowercase = batch_size
_lowercase = num_channels
_lowercase = min_resolution
_lowercase = max_resolution
_lowercase = do_resize
_lowercase = size
_lowercase = do_normalize
_lowercase = image_mean
_lowercase = image_std
_lowercase = do_rescale
_lowercase = rescale_factor
_lowercase = do_pad
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __UpperCAmelCase ( self : Tuple ,__A : Union[str, Any] ,__A : List[str]=False ) -> Union[str, Any]:
if not batched:
_lowercase = image_inputs[0]
if isinstance(__A ,Image.Image ):
_lowercase , _lowercase = image.size
else:
_lowercase , _lowercase = image.shape[1], image.shape[2]
if w < h:
_lowercase = int(self.size['shortest_edge'] * h / w )
_lowercase = self.size['shortest_edge']
elif w > h:
_lowercase = self.size['shortest_edge']
_lowercase = int(self.size['shortest_edge'] * w / h )
else:
_lowercase = self.size['shortest_edge']
_lowercase = self.size['shortest_edge']
else:
_lowercase = []
for image in image_inputs:
_lowercase , _lowercase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_lowercase = max(__A ,key=lambda __A : item[0] )[0]
_lowercase = max(__A ,key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A_ ( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = DetaImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
_lowercase = DetaImageProcessingTester(self )
@property
def __UpperCAmelCase ( self : List[Any] ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
_lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A ,'image_mean' ) )
self.assertTrue(hasattr(__A ,'image_std' ) )
self.assertTrue(hasattr(__A ,'do_normalize' ) )
self.assertTrue(hasattr(__A ,'do_resize' ) )
self.assertTrue(hasattr(__A ,'do_rescale' ) )
self.assertTrue(hasattr(__A ,'do_pad' ) )
self.assertTrue(hasattr(__A ,'size' ) )
def __UpperCAmelCase ( self : str ) -> List[str]:
_lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad ,__A )
def __UpperCAmelCase ( self : List[Any] ) -> Any:
pass
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A ,Image.Image )
# Test not batched input
_lowercase = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A ,batched=__A )
_lowercase = image_processing(__A ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__A ,numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A ,np.ndarray )
# Test not batched input
_lowercase = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
_lowercase = image_processing(__A ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A ,batched=__A )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def __UpperCAmelCase ( self : Optional[Any] ) -> Any:
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__A ,torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A ,torch.Tensor )
# Test not batched input
_lowercase = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
_lowercase = image_processing(__A ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A ,batched=__A )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
@slow
def __UpperCAmelCase ( self : List[str] ) -> List[str]:
# prepare image and target
_lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' ,'r' ) as f:
_lowercase = json.loads(f.read() )
_lowercase = {'image_id': 3_9769, 'annotations': target}
# encode them
_lowercase = DetaImageProcessor()
_lowercase = image_processing(images=__A ,annotations=__A ,return_tensors='pt' )
# verify pixel values
_lowercase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape ,__A )
_lowercase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,__A ,atol=1e-4 ) )
# verify area
_lowercase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,__A ) )
# verify boxes
_lowercase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape ,__A )
_lowercase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,__A ,atol=1e-3 ) )
# verify image_id
_lowercase = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,__A ) )
# verify is_crowd
_lowercase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,__A ) )
# verify class_labels
_lowercase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,__A ) )
# verify orig_size
_lowercase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,__A ) )
# verify size
_lowercase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,__A ) )
@slow
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
# prepare image, target and masks_path
_lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' ,'r' ) as f:
_lowercase = json.loads(f.read() )
_lowercase = {'file_name': '000000039769.png', 'image_id': 3_9769, 'segments_info': target}
_lowercase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
_lowercase = DetaImageProcessor(format='coco_panoptic' )
_lowercase = image_processing(images=__A ,annotations=__A ,masks_path=__A ,return_tensors='pt' )
# verify pixel values
_lowercase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape ,__A )
_lowercase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,__A ,atol=1e-4 ) )
# verify area
_lowercase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,__A ) )
# verify boxes
_lowercase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape ,__A )
_lowercase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,__A ,atol=1e-3 ) )
# verify image_id
_lowercase = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,__A ) )
# verify is_crowd
_lowercase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,__A ) )
# verify class_labels
_lowercase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,__A ) )
# verify masks
_lowercase = 82_2873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() ,__A )
# verify orig_size
_lowercase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,__A ) )
# verify size
_lowercase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,__A ) ) | 67 | 0 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
_A = filter(lambda _SCREAMING_SNAKE_CASE : p.requires_grad , model.parameters() )
_A = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__A : str = logging.getLogger(__name__)
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
if metric == "rouge2":
_A = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
_A = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
_A = '{val_avg_em:.4f}-{step_count}'
else:
raise NotImplementedError(
F"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"
' function.' )
_A = ModelCheckpoint(
dirpath=_SCREAMING_SNAKE_CASE , filename=_SCREAMING_SNAKE_CASE , monitor=F"val_{metric}" , mode='max' , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
return EarlyStopping(
monitor=F"val_{metric}" , mode='min' if 'loss' in metric else 'max' , patience=_SCREAMING_SNAKE_CASE , verbose=_SCREAMING_SNAKE_CASE , )
class lowerCamelCase( pl.Callback ):
'''simple docstring'''
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ ):
_A = {F"lr_group_{i}": param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(snake_case_ )
@rank_zero_only
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=True ):
logger.info(F"***** {type_path} results at step {trainer.global_step:05d} *****" )
_A = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
_A = Path(pl_module.hparams.output_dir )
if type_path == "test":
_A = od / 'test_results.txt'
_A = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_A = od / F"{type_path}_results/{trainer.global_step:05d}.txt"
_A = od / F"{type_path}_generations/{trainer.global_step:05d}.txt"
results_file.parent.mkdir(exist_ok=snake_case_ )
generations_file.parent.mkdir(exist_ok=snake_case_ )
with open(snake_case_ , 'a+' ) as writer:
for key in sorted(snake_case_ ):
if key in ["log", "progress_bar", "preds"]:
continue
_A = metrics[key]
if isinstance(snake_case_ , torch.Tensor ):
_A = val.item()
_A = F"{key}: {val:.6f}\n"
writer.write(snake_case_ )
if not save_generations:
return
if "preds" in metrics:
_A = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(snake_case_ )
@rank_zero_only
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ ):
try:
_A = pl_module.model.model.num_parameters()
except AttributeError:
_A = pl_module.model.num_parameters()
_A = count_trainable_parameters(snake_case_ )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} )
@rank_zero_only
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(snake_case_ , snake_case_ , 'test' )
@rank_zero_only
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 27 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("""At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training""")
# TF training parameters
snake_case = False
snake_case = False
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Namespace ) -> Tuple:
return TrainCommand(snake_case__ )
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@staticmethod
def __UpperCAmelCase ( __A : ArgumentParser ) -> List[Any]:
_lowercase = parser.add_parser('train' ,help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data' ,type=__A ,required=__A ,help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' ,)
train_parser.add_argument(
'--column_label' ,type=__A ,default=0 ,help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text' ,type=__A ,default=1 ,help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id' ,type=__A ,default=2 ,help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row' ,action='store_true' ,help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data' ,type=__A ,default='' ,help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split' ,type=__A ,default=0.1 ,help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' ,)
train_parser.add_argument('--output' ,type=__A ,default='./' ,help='path to saved the trained model.' )
train_parser.add_argument(
'--task' ,type=__A ,default='text_classification' ,help='Task to train the model on.' )
train_parser.add_argument(
'--model' ,type=__A ,default='bert-base-uncased' ,help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size' ,type=__A ,default=32 ,help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size' ,type=__A ,default=64 ,help='Batch size for validation.' )
train_parser.add_argument('--learning_rate' ,type=__A ,default=3e-5 ,help='Learning rate.' )
train_parser.add_argument('--adam_epsilon' ,type=__A ,default=1e-08 ,help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=__A )
def __init__( self : Optional[Any] ,__A : Namespace ) -> Tuple:
_lowercase = logging.get_logger('transformers-cli/training' )
_lowercase = 'tf' if is_tf_available() else 'torch'
os.makedirs(args.output ,exist_ok=__A )
_lowercase = args.output
_lowercase = args.column_label
_lowercase = args.column_text
_lowercase = args.column_id
self.logger.info(F"""Loading {args.task} pipeline for {args.model}""" )
if args.task == "text_classification":
_lowercase = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F"""Loading dataset from {args.train_data}""" )
_lowercase = Processor.create_from_csv(
args.train_data ,column_label=args.column_label ,column_text=args.column_text ,column_id=args.column_id ,skip_first_row=args.skip_first_row ,)
_lowercase = None
if args.validation_data:
self.logger.info(F"""Loading validation dataset from {args.validation_data}""" )
_lowercase = Processor.create_from_csv(
args.validation_data ,column_label=args.column_label ,column_text=args.column_text ,column_id=args.column_id ,skip_first_row=args.skip_first_row ,)
_lowercase = args.validation_split
_lowercase = args.train_batch_size
_lowercase = args.valid_batch_size
_lowercase = args.learning_rate
_lowercase = args.adam_epsilon
def __UpperCAmelCase ( self : Optional[Any] ) -> str:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
raise NotImplementedError
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
self.pipeline.fit(
self.train_dataset ,validation_data=self.valid_dataset ,validation_split=self.validation_split ,learning_rate=self.learning_rate ,adam_epsilon=self.adam_epsilon ,train_batch_size=self.train_batch_size ,valid_batch_size=self.valid_batch_size ,)
# Save trained pipeline
self.pipeline.save_pretrained(self.output ) | 67 | 0 |
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
UpperCamelCase_ = "3"
print("Python version:", sys.version)
print("transformers version:", transformers.__version__)
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
print("NCCL version:", torch.cuda.nccl.version())
except ImportError:
print("Torch version:", None)
try:
import deepspeed
print("DeepSpeed version:", deepspeed.__version__)
except ImportError:
print("DeepSpeed version:", None)
try:
import tensorflow as tf
print("TensorFlow version:", tf.__version__)
print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU")))
print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU")))
except ImportError:
print("TensorFlow version:", None)
| 28 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Any ) -> str:
_lowercase = DPTConfig(embedding_type='hybrid' )
if "large" in checkpoint_url:
_lowercase = 1024
_lowercase = 4096
_lowercase = 24
_lowercase = 16
_lowercase = [5, 11, 17, 23]
_lowercase = [256, 512, 1024, 1024]
_lowercase = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
_lowercase = 768
_lowercase = [1, 1, 1, 0.5]
_lowercase = [256, 512, 768, 768]
_lowercase = 150
_lowercase = 16
_lowercase = (1, 384, 384)
_lowercase = False
_lowercase = 'project'
if "ade" in checkpoint_url:
_lowercase = True
_lowercase = 768
_lowercase = [1, 1, 1, 0.5]
_lowercase = 150
_lowercase = 16
_lowercase = 'huggingface/label-files'
_lowercase = 'ade20k-id2label.json'
_lowercase = json.load(open(cached_download(hf_hub_url(snake_case__ , snake_case__ , repo_type='dataset' ) ) , 'r' ) )
_lowercase = {int(snake_case__ ): v for k, v in idalabel.items()}
_lowercase = idalabel
_lowercase = {v: k for k, v in idalabel.items()}
_lowercase = [1, 150, 480, 480]
return config, expected_shape
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> str:
_lowercase = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[Any] ) -> Any:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
_lowercase = name.replace('pretrained.model' , 'dpt.encoder' )
if "pretrained.model" in name:
_lowercase = name.replace('pretrained.model' , 'dpt.embeddings' )
if "patch_embed" in name:
_lowercase = name.replace('patch_embed' , '' )
if "pos_embed" in name:
_lowercase = name.replace('pos_embed' , 'position_embeddings' )
if "attn.proj" in name:
_lowercase = name.replace('attn.proj' , 'attention.output.dense' )
if "proj" in name and "project" not in name:
_lowercase = name.replace('proj' , 'projection' )
if "blocks" in name:
_lowercase = name.replace('blocks' , 'layer' )
if "mlp.fc1" in name:
_lowercase = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_lowercase = name.replace('mlp.fc2' , 'output.dense' )
if "norm1" in name and "backbone" not in name:
_lowercase = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name and "backbone" not in name:
_lowercase = name.replace('norm2' , 'layernorm_after' )
if "scratch.output_conv" in name:
_lowercase = name.replace('scratch.output_conv' , 'head' )
if "scratch" in name:
_lowercase = name.replace('scratch' , 'neck' )
if "layer1_rn" in name:
_lowercase = name.replace('layer1_rn' , 'convs.0' )
if "layer2_rn" in name:
_lowercase = name.replace('layer2_rn' , 'convs.1' )
if "layer3_rn" in name:
_lowercase = name.replace('layer3_rn' , 'convs.2' )
if "layer4_rn" in name:
_lowercase = name.replace('layer4_rn' , 'convs.3' )
if "refinenet" in name:
_lowercase = int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
_lowercase = name.replace(F"""refinenet{layer_idx}""" , F"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
_lowercase = name.replace('out_conv' , 'projection' )
if "resConfUnit1" in name:
_lowercase = name.replace('resConfUnit1' , 'residual_layer1' )
if "resConfUnit2" in name:
_lowercase = name.replace('resConfUnit2' , 'residual_layer2' )
if "conv1" in name:
_lowercase = name.replace('conv1' , 'convolution1' )
if "conv2" in name:
_lowercase = name.replace('conv2' , 'convolution2' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
_lowercase = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0' )
if "pretrained.act_postprocess2.0.project.0" in name:
_lowercase = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0' )
if "pretrained.act_postprocess3.0.project.0" in name:
_lowercase = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0' )
if "pretrained.act_postprocess4.0.project.0" in name:
_lowercase = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
_lowercase = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection' )
if "pretrained.act_postprocess1.4" in name:
_lowercase = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize' )
if "pretrained.act_postprocess2.3" in name:
_lowercase = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection' )
if "pretrained.act_postprocess2.4" in name:
_lowercase = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize' )
if "pretrained.act_postprocess3.3" in name:
_lowercase = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection' )
if "pretrained.act_postprocess4.3" in name:
_lowercase = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection' )
if "pretrained.act_postprocess4.4" in name:
_lowercase = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize' )
if "pretrained" in name:
_lowercase = name.replace('pretrained' , 'dpt' )
if "bn" in name:
_lowercase = name.replace('bn' , 'batch_norm' )
if "head" in name:
_lowercase = name.replace('head' , 'head.head' )
if "encoder.norm" in name:
_lowercase = name.replace('encoder.norm' , 'layernorm' )
if "auxlayer" in name:
_lowercase = name.replace('auxlayer' , 'auxiliary_head.head' )
if "backbone" in name:
_lowercase = name.replace('backbone' , 'backbone.bit.encoder' )
if ".." in name:
_lowercase = name.replace('..' , '.' )
if "stem.conv" in name:
_lowercase = name.replace('stem.conv' , 'bit.embedder.convolution' )
if "blocks" in name:
_lowercase = name.replace('blocks' , 'layers' )
if "convolution" in name and "backbone" in name:
_lowercase = name.replace('convolution' , 'conv' )
if "layer" in name and "backbone" in name:
_lowercase = name.replace('layer' , 'layers' )
if "backbone.bit.encoder.bit" in name:
_lowercase = name.replace('backbone.bit.encoder.bit' , 'backbone.bit' )
if "embedder.conv" in name:
_lowercase = name.replace('embedder.conv' , 'embedder.convolution' )
if "backbone.bit.encoder.stem.norm" in name:
_lowercase = name.replace('backbone.bit.encoder.stem.norm' , 'backbone.bit.embedder.norm' )
return name
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[str] , snake_case__ :int ) -> Dict:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowercase = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
_lowercase = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowercase = in_proj_weight[: config.hidden_size, :]
_lowercase = in_proj_bias[: config.hidden_size]
_lowercase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowercase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowercase = in_proj_weight[
-config.hidden_size :, :
]
_lowercase = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
_lowercase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowercase = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[int] , snake_case__ :List[Any] , snake_case__ :str , snake_case__ :Any , snake_case__ :List[str] ) -> str:
_lowercase , _lowercase = get_dpt_config(snake_case__ )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
_lowercase = torch.load(snake_case__ , map_location='cpu' )
# remove certain keys
remove_ignore_keys_(snake_case__ )
# rename keys
for key in state_dict.copy().keys():
_lowercase = state_dict.pop(snake_case__ )
_lowercase = val
# read in qkv matrices
read_in_q_k_v(snake_case__ , snake_case__ )
# load HuggingFace model
_lowercase = DPTForSemanticSegmentation(snake_case__ ) if 'ade' in checkpoint_url else DPTForDepthEstimation(snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
# Check outputs on an image
_lowercase = 480 if 'ade' in checkpoint_url else 384
_lowercase = DPTImageProcessor(size=snake_case__ )
_lowercase = prepare_img()
_lowercase = image_processor(snake_case__ , return_tensors='pt' )
# forward pass
_lowercase = model(**snake_case__ ).logits if 'ade' in checkpoint_url else model(**snake_case__ ).predicted_depth
if show_prediction:
_lowercase = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='bicubic' , align_corners=snake_case__ , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(snake_case__ )
if push_to_hub:
model.push_to_hub('ybelkada/dpt-hybrid-midas' )
image_processor.push_to_hub('ybelkada/dpt-hybrid-midas' )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you're pushing to the hub.""",
)
parser.add_argument(
"""--show_prediction""",
action="""store_true""",
)
snake_case = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
) | 67 | 0 |
"""simple docstring"""
from math import ceil
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = list(range(0 ,lowerCAmelCase__ ) )
lowerCamelCase_ = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
lowerCamelCase_ = []
for i in device_map_blocks:
if device_map_blocks.count(lowerCAmelCase__ ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(lowerCAmelCase__ )
# Missing blocks
lowerCamelCase_ = [i for i in blocks if i not in device_map_blocks]
lowerCamelCase_ = [i for i in device_map_blocks if i not in blocks]
if len(lowerCAmelCase__ ) != 0:
raise ValueError(
'''Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.'''
''' These attention blocks were specified more than once: ''' + str(lowerCAmelCase__ ) )
if len(lowerCAmelCase__ ) != 0:
raise ValueError(
'''There are attention blocks for this model that are not specified in the device_map. Add these attention '''
'''blocks to a device on the device_map: ''' + str(lowerCAmelCase__ ) )
if len(lowerCAmelCase__ ) != 0:
raise ValueError(
'''The device_map contains more attention blocks than this model has. Remove these from the device_map:'''
+ str(lowerCAmelCase__ ) )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = list(range(lowerCAmelCase__ ) )
lowerCamelCase_ = int(ceil(n_layers / len(lowerCAmelCase__ ) ) )
lowerCamelCase_ = [layers[i : i + n_blocks] for i in range(0 ,lowerCAmelCase__ ,lowerCAmelCase__ )]
return dict(zip(lowerCAmelCase__ ,lowerCAmelCase__ ) )
| 29 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
snake_case = {"""configuration_vit_mae""": ["""VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMAEConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMAEForPreTraining""",
"""ViTMAELayer""",
"""ViTMAEModel""",
"""ViTMAEPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""TFViTMAEForPreTraining""",
"""TFViTMAEModel""",
"""TFViTMAEPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 67 | 0 |
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase = 1 / sqrt(2 ) ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = tau * frequency / samplerate
UpperCAmelCase_ : int = sin(_lowercase )
UpperCAmelCase_ : List[str] = cos(_lowercase )
UpperCAmelCase_ : Optional[Any] = _sin / (2 * q_factor)
UpperCAmelCase_ : str = (1 - _cos) / 2
UpperCAmelCase_ : Dict = 1 - _cos
UpperCAmelCase_ : Dict = 1 + alpha
UpperCAmelCase_ : Any = -2 * _cos
UpperCAmelCase_ : List[Any] = 1 - alpha
UpperCAmelCase_ : List[str] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase = 1 / sqrt(2 ) ):
'''simple docstring'''
UpperCAmelCase_ : Any = tau * frequency / samplerate
UpperCAmelCase_ : Optional[int] = sin(_lowercase )
UpperCAmelCase_ : Optional[Any] = cos(_lowercase )
UpperCAmelCase_ : List[str] = _sin / (2 * q_factor)
UpperCAmelCase_ : Tuple = (1 + _cos) / 2
UpperCAmelCase_ : Any = -1 - _cos
UpperCAmelCase_ : Dict = 1 + alpha
UpperCAmelCase_ : Optional[Any] = -2 * _cos
UpperCAmelCase_ : Optional[int] = 1 - alpha
UpperCAmelCase_ : Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase = 1 / sqrt(2 ) ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = tau * frequency / samplerate
UpperCAmelCase_ : Union[str, Any] = sin(_lowercase )
UpperCAmelCase_ : int = cos(_lowercase )
UpperCAmelCase_ : Tuple = _sin / (2 * q_factor)
UpperCAmelCase_ : List[str] = _sin / 2
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : str = -ba
UpperCAmelCase_ : Optional[int] = 1 + alpha
UpperCAmelCase_ : Optional[int] = -2 * _cos
UpperCAmelCase_ : Any = 1 - alpha
UpperCAmelCase_ : Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase = 1 / sqrt(2 ) ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = tau * frequency / samplerate
UpperCAmelCase_ : List[Any] = sin(_lowercase )
UpperCAmelCase_ : Optional[int] = cos(_lowercase )
UpperCAmelCase_ : Union[str, Any] = _sin / (2 * q_factor)
UpperCAmelCase_ : Any = 1 - alpha
UpperCAmelCase_ : int = -2 * _cos
UpperCAmelCase_ : List[str] = 1 + alpha
UpperCAmelCase_ : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase = 1 / sqrt(2 ) , ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = tau * frequency / samplerate
UpperCAmelCase_ : int = sin(_lowercase )
UpperCAmelCase_ : int = cos(_lowercase )
UpperCAmelCase_ : Tuple = _sin / (2 * q_factor)
UpperCAmelCase_ : Dict = 10 ** (gain_db / 40)
UpperCAmelCase_ : List[str] = 1 + alpha * big_a
UpperCAmelCase_ : Tuple = -2 * _cos
UpperCAmelCase_ : Tuple = 1 - alpha * big_a
UpperCAmelCase_ : Tuple = 1 + alpha / big_a
UpperCAmelCase_ : Optional[Any] = -2 * _cos
UpperCAmelCase_ : Dict = 1 - alpha / big_a
UpperCAmelCase_ : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase = 1 / sqrt(2 ) , ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = tau * frequency / samplerate
UpperCAmelCase_ : Dict = sin(_lowercase )
UpperCAmelCase_ : List[str] = cos(_lowercase )
UpperCAmelCase_ : str = _sin / (2 * q_factor)
UpperCAmelCase_ : str = 10 ** (gain_db / 40)
UpperCAmelCase_ : Tuple = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase_ : Optional[Any] = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase_ : str = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase_ : Optional[Any] = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase_ : int = 2 * sqrt(_lowercase ) * alpha
UpperCAmelCase_ : Optional[Any] = big_a * (pmc + aaa)
UpperCAmelCase_ : List[str] = 2 * big_a * mpc
UpperCAmelCase_ : Tuple = big_a * (pmc - aaa)
UpperCAmelCase_ : Optional[Any] = ppmc + aaa
UpperCAmelCase_ : List[str] = -2 * pmpc
UpperCAmelCase_ : Dict = ppmc - aaa
UpperCAmelCase_ : str = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase = 1 / sqrt(2 ) , ):
'''simple docstring'''
UpperCAmelCase_ : Any = tau * frequency / samplerate
UpperCAmelCase_ : Dict = sin(_lowercase )
UpperCAmelCase_ : int = cos(_lowercase )
UpperCAmelCase_ : str = _sin / (2 * q_factor)
UpperCAmelCase_ : Tuple = 10 ** (gain_db / 40)
UpperCAmelCase_ : int = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase_ : Tuple = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase_ : str = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase_ : Dict = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase_ : Any = 2 * sqrt(_lowercase ) * alpha
UpperCAmelCase_ : Union[str, Any] = big_a * (ppmc + aaa)
UpperCAmelCase_ : int = -2 * big_a * pmpc
UpperCAmelCase_ : Union[str, Any] = big_a * (ppmc - aaa)
UpperCAmelCase_ : Optional[Any] = pmc + aaa
UpperCAmelCase_ : Union[str, Any] = 2 * mpc
UpperCAmelCase_ : str = pmc - aaa
UpperCAmelCase_ : str = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt | 30 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline | 67 | 0 |
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
lowerCamelCase__ : Tuple = {
# 1536-bit
5: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 2048-bit
14: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AACAA68FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 3072-bit
15: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 4096-bit
16: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'
+ 'FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 6144-bit
17: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'
+ '8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'
+ '302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'
+ 'A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'
+ '49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'
+ 'FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'
+ '180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'
+ '3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'
+ '04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'
+ 'B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'
+ '1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'
+ 'E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'
+ '99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'
+ '04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'
+ '233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'
+ 'D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'
+ 'AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'
+ 'DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'
+ '2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'
+ 'F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'
+ 'BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'
+ 'B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'
+ '387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'
+ '6DCC4024FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 8192-bit
18: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'
+ 'F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'
+ '179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'
+ 'DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'
+ '5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'
+ 'D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'
+ '23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'
+ '06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'
+ 'DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'
+ '12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'
+ '38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'
+ '741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'
+ '3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'
+ '22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'
+ '4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'
+ '062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'
+ '4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'
+ 'B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'
+ '4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'
+ '9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'
+ '60C980DD98EDD3DFFFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
}
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : str , _lowerCAmelCase : int = 14 ):
if group not in primes:
raise ValueError('Unsupported Group' )
SCREAMING_SNAKE_CASE_ = primes[group]['prime']
SCREAMING_SNAKE_CASE_ = primes[group]['generator']
SCREAMING_SNAKE_CASE_ = int(hexlify(urandom(32 ) ) , base=16 )
def lowerCAmelCase_ ( self : List[Any] ):
return hex(self.__private_key )[2:]
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = pow(self.generator , self.__private_key , self.prime )
return hex(_lowerCAmelCase )[2:]
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(_lowerCAmelCase , (self.prime - 1) // 2 , self.prime ) == 1
)
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : str ):
SCREAMING_SNAKE_CASE_ = int(_lowerCAmelCase , base=16 )
if not self.is_valid_public_key(_lowerCAmelCase ):
raise ValueError('Invalid public key' )
SCREAMING_SNAKE_CASE_ = pow(_lowerCAmelCase , self.__private_key , self.prime )
return shaaaa(str(_lowerCAmelCase ).encode() ).hexdigest()
@staticmethod
def lowerCAmelCase_ ( _lowerCAmelCase : int , _lowerCAmelCase : int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(_lowerCAmelCase , (prime - 1) // 2 , _lowerCAmelCase ) == 1
)
@staticmethod
def lowerCAmelCase_ ( _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : int = 14 ):
SCREAMING_SNAKE_CASE_ = int(_lowerCAmelCase , base=16 )
SCREAMING_SNAKE_CASE_ = int(_lowerCAmelCase , base=16 )
SCREAMING_SNAKE_CASE_ = primes[group]['prime']
if not DiffieHellman.is_valid_public_key_static(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError('Invalid public key' )
SCREAMING_SNAKE_CASE_ = pow(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return shaaaa(str(_lowerCAmelCase ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod() | 31 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""",
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = '''transfo-xl'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['''mems''']
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Optional[Any] ,__A : Union[str, Any]=26_7735 ,__A : List[Any]=[2_0000, 4_0000, 20_0000] ,__A : Dict=1024 ,__A : str=1024 ,__A : Dict=16 ,__A : int=64 ,__A : Dict=4096 ,__A : List[Any]=4 ,__A : Optional[int]=False ,__A : Union[str, Any]=18 ,__A : Tuple=1600 ,__A : str=1000 ,__A : Dict=True ,__A : Dict=True ,__A : int=0 ,__A : Optional[int]=-1 ,__A : int=True ,__A : List[str]=0.1 ,__A : Optional[int]=0.0 ,__A : str=True ,__A : Tuple="normal" ,__A : Union[str, Any]=0.01 ,__A : Tuple=0.01 ,__A : Any=0.02 ,__A : Union[str, Any]=1e-5 ,__A : List[Any]=0 ,**__A : str ,) -> List[Any]:
_lowercase = vocab_size
_lowercase = []
self.cutoffs.extend(__A )
if proj_share_all_but_first:
_lowercase = [False] + [True] * len(self.cutoffs )
else:
_lowercase = [False] + [False] * len(self.cutoffs )
_lowercase = d_model
_lowercase = d_embed
_lowercase = d_head
_lowercase = d_inner
_lowercase = div_val
_lowercase = pre_lnorm
_lowercase = n_layer
_lowercase = n_head
_lowercase = mem_len
_lowercase = same_length
_lowercase = attn_type
_lowercase = clamp_len
_lowercase = sample_softmax
_lowercase = adaptive
_lowercase = dropout
_lowercase = dropatt
_lowercase = untie_r
_lowercase = init
_lowercase = init_range
_lowercase = proj_init_std
_lowercase = init_std
_lowercase = layer_norm_epsilon
super().__init__(eos_token_id=__A ,**__A )
@property
def __UpperCAmelCase ( self : str ) -> Optional[int]:
# Message copied from Transformer-XL documentation
logger.info(F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def __UpperCAmelCase ( self : Any ,__A : Dict ) -> Optional[Any]:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" ) | 67 | 0 |
from __future__ import annotations
def A__ ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ) -> float:
"""simple docstring"""
if days_between_payments <= 0:
raise ValueError('''days_between_payments must be > 0''' )
if daily_interest_rate < 0:
raise ValueError('''daily_interest_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * daily_interest_rate * days_between_payments
def A__ ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , ) -> float:
"""simple docstring"""
if number_of_compounding_periods <= 0:
raise ValueError('''number_of_compounding_periods must be > 0''' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def A__ ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , ) -> float:
"""simple docstring"""
if number_of_years <= 0:
raise ValueError('''number_of_years must be > 0''' )
if nominal_annual_percentage_rate < 0:
raise ValueError('''nominal_annual_percentage_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return compound_interest(
SCREAMING_SNAKE_CASE_ , nominal_annual_percentage_rate / 3_65 , number_of_years * 3_65 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 32 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = '''dpr'''
def __init__( self : int ,__A : Union[str, Any]=3_0522 ,__A : Optional[int]=768 ,__A : int=12 ,__A : List[Any]=12 ,__A : Optional[Any]=3072 ,__A : Union[str, Any]="gelu" ,__A : Union[str, Any]=0.1 ,__A : List[Any]=0.1 ,__A : str=512 ,__A : List[str]=2 ,__A : Tuple=0.02 ,__A : Tuple=1e-12 ,__A : List[Any]=0 ,__A : List[str]="absolute" ,__A : int = 0 ,**__A : int ,) -> Tuple:
super().__init__(pad_token_id=__A ,**__A )
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = hidden_act
_lowercase = intermediate_size
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = max_position_embeddings
_lowercase = type_vocab_size
_lowercase = initializer_range
_lowercase = layer_norm_eps
_lowercase = projection_dim
_lowercase = position_embedding_type | 67 | 0 |
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
lowerCamelCase__ : Dict = logging.getLogger(__name__)
lowerCamelCase__ : Union[str, Any] = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
lowerCamelCase__ : Any = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __magic_name__ :
'''simple docstring'''
__lowercase : Optional[str] = field(
default=snake_case_ ,metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} ,)
__lowercase : Optional[str] = field(
default=snake_case_ ,metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(snake_case_ )} ,)
__lowercase : Optional[str] = field(
default=snake_case_ ,metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} ,)
__lowercase : Optional[str] = field(
default=snake_case_ ,metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__lowercase : Optional[str] = field(
default=snake_case_ ,metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__lowercase : Optional[str] = field(
default=snake_case_ ,metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} ,)
__lowercase : bool = field(
default=snake_case_ ,metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} ,)
__lowercase : str = field(
default='main' ,metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} ,)
__lowercase : bool = field(
default=snake_case_ ,metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} ,)
def SCREAMING_SNAKE_CASE__ ( self:str ):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'''--config_overrides can\'t be used in combination with --config_name or --model_name_or_path''' )
@dataclass
class __magic_name__ :
'''simple docstring'''
__lowercase : Optional[str] = field(
default=snake_case_ ,metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
__lowercase : Optional[str] = field(
default=snake_case_ ,metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
__lowercase : Optional[str] = field(default=snake_case_ ,metadata={'help': 'The input training data file (a text file).'} )
__lowercase : Optional[str] = field(
default=snake_case_ ,metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} ,)
__lowercase : Optional[str] = field(
default=snake_case_ ,metadata={'help': 'An optional input train ref data file for whole word masking in Chinese.'} ,)
__lowercase : Optional[str] = field(
default=snake_case_ ,metadata={'help': 'An optional input validation ref data file for whole word masking in Chinese.'} ,)
__lowercase : bool = field(
default=snake_case_ ,metadata={'help': 'Overwrite the cached training and evaluation sets'} )
__lowercase : Optional[int] = field(
default=5 ,metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} ,)
__lowercase : Optional[int] = field(
default=snake_case_ ,metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated. Default to the max input length of the model.'
)
} ,)
__lowercase : Optional[int] = field(
default=snake_case_ ,metadata={'help': 'The number of processes to use for the preprocessing.'} ,)
__lowercase : float = field(
default=0.15 ,metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
__lowercase : bool = field(
default=snake_case_ ,metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} ,)
def SCREAMING_SNAKE_CASE__ ( self:str ):
if self.train_file is not None:
snake_case__ = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
snake_case__ = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
with open(__lowerCAmelCase , '''r''' , encoding='''utf-8''' ) as f:
snake_case__ = [json.loads(__lowerCAmelCase ) for line in f.read().splitlines() if (len(__lowerCAmelCase ) > 0 and not line.isspace())]
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
snake_case__ = {c: dataset[c] for c in dataset.column_names}
snake_case__ = refs
return Dataset.from_dict(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case__ , snake_case__ , snake_case__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case__ , snake_case__ , snake_case__ = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
snake_case__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __lowerCAmelCase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
snake_case__ = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
snake_case__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[:{data_args.validation_split_percentage}%]""" , )
snake_case__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[{data_args.validation_split_percentage}%:]""" , )
else:
snake_case__ = {}
if data_args.train_file is not None:
snake_case__ = data_args.train_file
if data_args.validation_file is not None:
snake_case__ = data_args.validation_file
snake_case__ = data_args.train_file.split('''.''' )[-1]
if extension == "txt":
snake_case__ = '''text'''
snake_case__ = load_dataset(__lowerCAmelCase , data_files=__lowerCAmelCase )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case__ = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
snake_case__ = AutoConfig.from_pretrained(model_args.config_name , **__lowerCAmelCase )
elif model_args.model_name_or_path:
snake_case__ = AutoConfig.from_pretrained(model_args.model_name_or_path , **__lowerCAmelCase )
else:
snake_case__ = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
snake_case__ = {
'''cache_dir''': model_args.cache_dir,
'''use_fast''': model_args.use_fast_tokenizer,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
snake_case__ = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **__lowerCAmelCase )
elif model_args.model_name_or_path:
snake_case__ = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **__lowerCAmelCase )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.''' )
if model_args.model_name_or_path:
snake_case__ = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
snake_case__ = AutoModelForMaskedLM.from_config(__lowerCAmelCase )
model.resize_token_embeddings(len(__lowerCAmelCase ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
snake_case__ = datasets['''train'''].column_names
else:
snake_case__ = datasets['''validation'''].column_names
snake_case__ = '''text''' if '''text''' in column_names else column_names[0]
snake_case__ = '''max_length''' if data_args.pad_to_max_length else False
def tokenize_function(__lowerCAmelCase ):
# Remove empty lines
snake_case__ = [line for line in examples['''text'''] if len(__lowerCAmelCase ) > 0 and not line.isspace()]
return tokenizer(examples['''text'''] , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=data_args.max_seq_length )
snake_case__ = datasets.map(
__lowerCAmelCase , batched=__lowerCAmelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
snake_case__ = add_chinese_references(tokenized_datasets['''train'''] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
snake_case__ = add_chinese_references(
tokenized_datasets['''validation'''] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
snake_case__ = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
snake_case__ = False
# Data collator
# This one will take care of randomly masking the tokens.
snake_case__ = DataCollatorForWholeWordMask(tokenizer=__lowerCAmelCase , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
snake_case__ = Trainer(
model=__lowerCAmelCase , args=__lowerCAmelCase , train_dataset=tokenized_datasets['''train'''] if training_args.do_train else None , eval_dataset=tokenized_datasets['''validation'''] if training_args.do_eval else None , tokenizer=__lowerCAmelCase , data_collator=__lowerCAmelCase , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
snake_case__ = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
snake_case__ = model_args.model_name_or_path
else:
snake_case__ = None
snake_case__ = trainer.train(resume_from_checkpoint=__lowerCAmelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
snake_case__ = os.path.join(training_args.output_dir , '''train_results.txt''' )
if trainer.is_world_process_zero():
with open(__lowerCAmelCase , '''w''' ) as writer:
logger.info('''***** Train results *****''' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# Evaluation
snake_case__ = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
snake_case__ = trainer.evaluate()
snake_case__ = math.exp(eval_output['''eval_loss'''] )
snake_case__ = perplexity
snake_case__ = os.path.join(training_args.output_dir , '''eval_results_mlm_wwm.txt''' )
if trainer.is_world_process_zero():
with open(__lowerCAmelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in sorted(results.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
return results
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> List[str]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 33 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
snake_case = Lock()
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[int] , snake_case__ :Union[str, Any] , snake_case__ :Tuple , snake_case__ :Any , snake_case__ :Dict , snake_case__ :Optional[int] , snake_case__ :List[str] ) -> Optional[Any]:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(snake_case__ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
_lowercase = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
_lowercase = min(snake_case__ , snake_case__ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(snake_case__ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
_lowercase = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
_lowercase = max(snake_case__ , snake_case__ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> Dict:
_lowercase = []
_lowercase = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
_lowercase = Pipe()
_lowercase = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
_lowercase = temp_rs
_lowercase = temp_rr
for i in range(1 , len(snake_case__ ) - 1 ):
_lowercase = Pipe()
_lowercase = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
_lowercase = temp_rs
_lowercase = temp_rr
process_array_.append(
Process(
target=snake_case__ , args=(
len(snake_case__ ) - 1,
arr[len(snake_case__ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(snake_case__ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(snake_case__ ) ):
_lowercase = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
_lowercase = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*snake_case__ )
_lowercase = odd_even_transposition(snake_case__ )
print('Sorted List\n' )
print(*snake_case__ )
if __name__ == "__main__":
main() | 67 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ = {'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FocalNetForImageClassification',
'FocalNetForMaskedImageModeling',
'FocalNetBackbone',
'FocalNetModel',
'FocalNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 34 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""",
"""google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""",
"""google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = '''big_bird'''
def __init__( self : str ,__A : Union[str, Any]=5_0358 ,__A : Any=768 ,__A : List[str]=12 ,__A : Union[str, Any]=12 ,__A : int=3072 ,__A : Tuple="gelu_new" ,__A : Any=0.1 ,__A : Optional[Any]=0.1 ,__A : Tuple=4096 ,__A : int=2 ,__A : Union[str, Any]=0.02 ,__A : Optional[int]=1e-12 ,__A : List[str]=True ,__A : List[Any]=0 ,__A : Optional[Any]=1 ,__A : Optional[int]=2 ,__A : Optional[int]=66 ,__A : Tuple="block_sparse" ,__A : Optional[int]=True ,__A : Optional[int]=False ,__A : Tuple=64 ,__A : str=3 ,__A : Optional[int]=None ,**__A : Dict ,) -> Union[str, Any]:
super().__init__(
pad_token_id=__A ,bos_token_id=__A ,eos_token_id=__A ,sep_token_id=__A ,**__A ,)
_lowercase = vocab_size
_lowercase = max_position_embeddings
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = intermediate_size
_lowercase = hidden_act
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = initializer_range
_lowercase = type_vocab_size
_lowercase = layer_norm_eps
_lowercase = use_cache
_lowercase = rescale_embeddings
_lowercase = attention_type
_lowercase = use_bias
_lowercase = block_size
_lowercase = num_random_blocks
_lowercase = classifier_dropout
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : int ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_lowercase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowercase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] ) | 67 | 0 |
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def a ( A__ ) -> List[Any]:
'''simple docstring'''
for param in module.parameters():
SCREAMING_SNAKE_CASE__ : Optional[int] = False
def a ( ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
SCREAMING_SNAKE_CASE__ : List[str] = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def a ( A__ ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = plt.imshow(A__ )
fig.axes.get_xaxis().set_visible(A__ )
fig.axes.get_yaxis().set_visible(A__ )
plt.show()
def a ( ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = datetime.now()
SCREAMING_SNAKE_CASE__ : Optional[int] = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 35 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> list:
_lowercase = [0] * len(snake_case__ )
for i in range(1 , len(snake_case__ ) ):
# use last results for better performance - dynamic programming
_lowercase = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
_lowercase = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
_lowercase = j
return prefix_result
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> int:
return max(prefix_function(snake_case__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 67 | 0 |
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : List[str] = logging.get_logger(__name__)
__lowercase : List[Any] = {
'''facebook/encodec_24khz''': '''https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json''',
'''facebook/encodec_48khz''': '''https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json''',
}
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Dict = '''encodec'''
def __init__( self ,SCREAMING_SNAKE_CASE_=[1.5, 3.0, 6.0, 12.0, 24.0] ,SCREAMING_SNAKE_CASE_=24000 ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_=128 ,SCREAMING_SNAKE_CASE_=32 ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_=[8, 5, 4, 2] ,SCREAMING_SNAKE_CASE_="weight_norm" ,SCREAMING_SNAKE_CASE_=7 ,SCREAMING_SNAKE_CASE_=7 ,SCREAMING_SNAKE_CASE_=3 ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_="reflect" ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_=1.0 ,SCREAMING_SNAKE_CASE_=1024 ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_=True ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : Optional[Any] = target_bandwidths
snake_case : Tuple = sampling_rate
snake_case : str = audio_channels
snake_case : int = normalize
snake_case : Tuple = chunk_length_s
snake_case : Tuple = overlap
snake_case : Tuple = hidden_size
snake_case : Dict = num_filters
snake_case : List[Any] = num_residual_layers
snake_case : Optional[Any] = upsampling_ratios
snake_case : Union[str, Any] = norm_type
snake_case : List[Any] = kernel_size
snake_case : List[str] = last_kernel_size
snake_case : List[Any] = residual_kernel_size
snake_case : int = dilation_growth_rate
snake_case : str = use_causal_conv
snake_case : Tuple = pad_mode
snake_case : int = compress
snake_case : str = num_lstm_layers
snake_case : List[Any] = trim_right_ratio
snake_case : Any = codebook_size
snake_case : List[Any] = codebook_dim if codebook_dim is not None else hidden_size
snake_case : Dict = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**SCREAMING_SNAKE_CASE_ )
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 ,int((1.0 - self.overlap) * self.chunk_length ) )
@property
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Optional[Any] = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def snake_case_ ( self ):
'''simple docstring'''
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 36 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> Union[str, Any]:
_lowercase = len(snake_case__ )
_lowercase = sum(snake_case__ )
_lowercase = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
_lowercase = True
for i in range(1 , s + 1 ):
_lowercase = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
_lowercase = dp[i][j - 1]
if arr[i - 1] <= j:
_lowercase = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
_lowercase = s - 2 * j
break
return diff | 67 | 0 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class A__ :
"""simple docstring"""
_lowercase = 42
_lowercase = None
_lowercase = None
UpperCamelCase : Union[str, Any] = namedtuple("""CoinsDistribResult""", """moves excess""")
def UpperCamelCase_ ( __a ) -> int:
if root is None:
return 0
# Validation
def count_nodes(__a ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(__a ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(__a ) != count_coins(__a ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(__a ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
a__, a__ : Optional[Any] = get_distrib(node.left )
a__, a__ : List[Any] = get_distrib(node.right )
a__ : str = 1 - left_distrib_excess
a__ : Dict = 1 - right_distrib_excess
a__ : Any = (
left_distrib_moves
+ right_distrib_moves
+ abs(__a )
+ abs(__a )
)
a__ : str = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(__a , __a )
return get_distrib(__a )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 37 |
from manim import *
class A_ ( UpperCAmelCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
_lowercase = Rectangle(height=0.5 ,width=0.5 )
_lowercase = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
_lowercase = Rectangle(height=0.25 ,width=0.25 )
_lowercase = [mem.copy() for i in range(6 )]
_lowercase = [mem.copy() for i in range(6 )]
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = VGroup(__A ,__A ).arrange(__A ,buff=0 )
_lowercase = Text('CPU' ,font_size=24 )
_lowercase = Group(__A ,__A ).arrange(__A ,buff=0.5 ,aligned_edge=__A )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__A )
_lowercase = [mem.copy() for i in range(4 )]
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = Text('GPU' ,font_size=24 )
_lowercase = Group(__A ,__A ).arrange(__A ,buff=0.5 ,aligned_edge=__A )
gpu.move_to([-1, -1, 0] )
self.add(__A )
_lowercase = [mem.copy() for i in range(6 )]
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = Text('Model' ,font_size=24 )
_lowercase = Group(__A ,__A ).arrange(__A ,buff=0.5 ,aligned_edge=__A )
model.move_to([3, -1.0, 0] )
self.add(__A )
_lowercase = []
_lowercase = []
for i, rect in enumerate(__A ):
_lowercase = fill.copy().set_fill(__A ,opacity=0.8 )
target.move_to(__A )
model_arr.append(__A )
_lowercase = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(__A ,opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(__A )
self.add(*__A ,*__A )
_lowercase = [meta_mem.copy() for i in range(6 )]
_lowercase = [meta_mem.copy() for i in range(6 )]
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = VGroup(__A ,__A ).arrange(__A ,buff=0 )
_lowercase = Text('Disk' ,font_size=24 )
_lowercase = Group(__A ,__A ).arrange(__A ,buff=0.5 ,aligned_edge=__A )
disk.move_to([-4, -1.25, 0] )
self.add(__A ,__A )
_lowercase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_lowercase = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
self.add(__A ,__A )
_lowercase = MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" ,font_size=18 ,)
blue_text.next_to(__A ,DOWN * 2.4 ,aligned_edge=key_text.get_left() )
self.add(__A )
_lowercase = MarkupText(
F"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__A ) )
_lowercase = Square(0.3 )
input.set_fill(__A ,opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] ,__A ,buff=0.5 )
self.play(Write(__A ) )
input.generate_target()
input.target.next_to(model_arr[0] ,direction=__A ,buff=0.02 )
self.play(MoveToTarget(__A ) )
self.play(FadeOut(__A ) )
_lowercase = Arrow(start=__A ,end=__A ,color=__A ,buff=0.5 )
a.next_to(model_arr[0].get_left() ,__A ,buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
_lowercase = MarkupText(
F"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__A ,run_time=3 ) )
_lowercase = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(__A ) ,Circumscribe(model_arr[0] ,color=__A ,**__A ) ,Circumscribe(model_cpu_arr[0] ,color=__A ,**__A ) ,Circumscribe(gpu_rect[0] ,color=__A ,**__A ) ,)
self.play(MoveToTarget(model_cpu_arr[0] ) )
_lowercase = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 ,__A ,buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
_lowercase = AnimationGroup(
FadeOut(__A ,run_time=0.5 ) ,MoveToTarget(__A ,run_time=0.5 ) ,FadeIn(__A ,run_time=0.5 ) ,lag_ratio=0.2 )
self.play(__A )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
_lowercase = 0.7
self.play(
Circumscribe(model_arr[i] ,**__A ) ,Circumscribe(cpu_left_col_base[i] ,**__A ) ,Circumscribe(cpu_left_col_base[i + 1] ,color=__A ,**__A ) ,Circumscribe(gpu_rect[0] ,color=__A ,**__A ) ,Circumscribe(model_arr[i + 1] ,color=__A ,**__A ) ,)
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) ,MoveToTarget(model_cpu_arr[i + 1] ) ,)
else:
self.play(
MoveToTarget(model_cpu_arr[i] ,run_time=0.7 ) ,MoveToTarget(model_cpu_arr[i + 1] ,run_time=0.7 ) ,)
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() ,RIGHT + 0.02 ,buff=0.2 )
self.play(
Circumscribe(model_arr[-1] ,color=__A ,**__A ) ,Circumscribe(cpu_left_col_base[-1] ,color=__A ,**__A ) ,Circumscribe(gpu_rect[0] ,color=__A ,**__A ) ,)
self.play(MoveToTarget(model_cpu_arr[i] ) )
_lowercase = a_c
_lowercase = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] ,RIGHT + 0.02 ,buff=0.5 )
self.play(
FadeOut(__A ) ,FadeOut(__A ,run_time=0.5 ) ,)
_lowercase = MarkupText(F"""Inference on a model too large for GPU memory\nis successfully completed.""" ,font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__A ,run_time=3 ) ,MoveToTarget(__A ) )
self.wait() | 67 | 0 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class __snake_case :
'''simple docstring'''
@staticmethod
def __UpperCamelCase ( *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
pass
def UpperCamelCase__ ( __magic_name__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
A_ : str = (
"https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png"
)
@is_pipeline_test
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : List[str] = pipeline(
"""document-question-answering""" , model=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = INVOICE_URL
snake_case__ : Tuple = list(zip(*apply_tesseract(load_image(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , """""" ) ) )
snake_case__ : Dict = """What is the placebo?"""
snake_case__ : Dict = [
{
"""image""": load_image(__SCREAMING_SNAKE_CASE ),
"""question""": question,
},
{
"""image""": image,
"""question""": question,
},
{
"""image""": image,
"""question""": question,
"""word_boxes""": word_boxes,
},
]
return dqa_pipeline, examples
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[int] = dqa_pipeline(__SCREAMING_SNAKE_CASE , top_k=2 )
self.assertEqual(
__SCREAMING_SNAKE_CASE , [
[
{"""score""": ANY(__SCREAMING_SNAKE_CASE ), """answer""": ANY(__SCREAMING_SNAKE_CASE ), """start""": ANY(__SCREAMING_SNAKE_CASE ), """end""": ANY(__SCREAMING_SNAKE_CASE )},
{"""score""": ANY(__SCREAMING_SNAKE_CASE ), """answer""": ANY(__SCREAMING_SNAKE_CASE ), """start""": ANY(__SCREAMING_SNAKE_CASE ), """end""": ANY(__SCREAMING_SNAKE_CASE )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = pipeline("""document-question-answering""" , model="""hf-internal-testing/tiny-random-layoutlmv2""" )
snake_case__ : Any = INVOICE_URL
snake_case__ : int = """How many cats are there?"""
snake_case__ : str = [
{"""score""": 0.0001, """answer""": """oy 2312/2019""", """start""": 3_8, """end""": 3_9},
{"""score""": 0.0001, """answer""": """oy 2312/2019 DUE""", """start""": 3_8, """end""": 4_0},
]
snake_case__ : Tuple = dqa_pipeline(image=__SCREAMING_SNAKE_CASE , question=__SCREAMING_SNAKE_CASE , top_k=2 )
self.assertEqual(nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , __SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , __SCREAMING_SNAKE_CASE )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
snake_case__ : Tuple = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
snake_case__ : Tuple = dqa_pipeline(image=__SCREAMING_SNAKE_CASE , question=__SCREAMING_SNAKE_CASE , top_k=2 )
self.assertEqual(__SCREAMING_SNAKE_CASE , [] )
# We can optionnally pass directly the words and bounding boxes
snake_case__ : Tuple = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
snake_case__ : Optional[int] = []
snake_case__ : Optional[int] = []
snake_case__ : Any = dqa_pipeline(image=__SCREAMING_SNAKE_CASE , question=__SCREAMING_SNAKE_CASE , words=__SCREAMING_SNAKE_CASE , boxes=__SCREAMING_SNAKE_CASE , top_k=2 )
self.assertEqual(__SCREAMING_SNAKE_CASE , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __UpperCamelCase ( self ):
snake_case__ : Dict = pipeline(
"""document-question-answering""" , model="""tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa""" , revision="""9977165""" , )
snake_case__ : int = INVOICE_URL
snake_case__ : Tuple = """What is the invoice number?"""
snake_case__ : str = dqa_pipeline(image=__SCREAMING_SNAKE_CASE , question=__SCREAMING_SNAKE_CASE , top_k=2 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
{"""score""": 0.9944, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.0009, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
] , )
snake_case__ : int = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
{"""score""": 0.9944, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.0009, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
] , )
snake_case__ : List[Any] = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
[
{"""score""": 0.9944, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.0009, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __UpperCamelCase ( self ):
snake_case__ : int = pipeline(
"""document-question-answering""" , model="""tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa""" , revision="""9977165""" , max_seq_len=5_0 , )
snake_case__ : Any = INVOICE_URL
snake_case__ : Tuple = """What is the invoice number?"""
snake_case__ : Union[str, Any] = dqa_pipeline(image=__SCREAMING_SNAKE_CASE , question=__SCREAMING_SNAKE_CASE , top_k=2 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
{"""score""": 0.9974, """answer""": """1110212019""", """start""": 2_3, """end""": 2_3},
{"""score""": 0.9948, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
] , )
snake_case__ : Dict = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
{"""score""": 0.9974, """answer""": """1110212019""", """start""": 2_3, """end""": 2_3},
{"""score""": 0.9948, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
] , )
snake_case__ : List[str] = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
[
{"""score""": 0.9974, """answer""": """1110212019""", """start""": 2_3, """end""": 2_3},
{"""score""": 0.9948, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = AutoTokenizer.from_pretrained(
"""impira/layoutlm-document-qa""" , revision="""3dc6de3""" , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = pipeline(
"""document-question-answering""" , model="""impira/layoutlm-document-qa""" , tokenizer=__SCREAMING_SNAKE_CASE , revision="""3dc6de3""" , )
snake_case__ : str = INVOICE_URL
snake_case__ : Optional[int] = """What is the invoice number?"""
snake_case__ : int = dqa_pipeline(image=__SCREAMING_SNAKE_CASE , question=__SCREAMING_SNAKE_CASE , top_k=2 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
{"""score""": 0.4251, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.0819, """answer""": """1110212019""", """start""": 2_3, """end""": 2_3},
] , )
snake_case__ : str = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
{"""score""": 0.4251, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.0819, """answer""": """1110212019""", """start""": 2_3, """end""": 2_3},
] , )
snake_case__ : List[str] = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
[
{"""score""": 0.4251, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.0819, """answer""": """1110212019""", """start""": 2_3, """end""": 2_3},
]
]
* 2 , )
snake_case__ : Any = list(zip(*apply_tesseract(load_image(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , """""" ) ) )
# This model should also work if `image` is set to None
snake_case__ : Optional[int] = dqa_pipeline({"""image""": None, """word_boxes""": word_boxes, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
{"""score""": 0.4251, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.0819, """answer""": """1110212019""", """start""": 2_3, """end""": 2_3},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = AutoTokenizer.from_pretrained(
"""impira/layoutlm-document-qa""" , revision="""3dc6de3""" , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = pipeline(
"""document-question-answering""" , model="""impira/layoutlm-document-qa""" , tokenizer=__SCREAMING_SNAKE_CASE , revision="""3dc6de3""" , max_seq_len=5_0 , )
snake_case__ : str = INVOICE_URL
snake_case__ : Optional[Any] = """What is the invoice number?"""
snake_case__ : List[str] = dqa_pipeline(image=__SCREAMING_SNAKE_CASE , question=__SCREAMING_SNAKE_CASE , top_k=2 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
{"""score""": 0.9999, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.9998, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
] , )
snake_case__ : Dict = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
[
{"""score""": 0.9999, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.9998, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
]
]
* 2 , )
snake_case__ : Any = list(zip(*apply_tesseract(load_image(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , """""" ) ) )
# This model should also work if `image` is set to None
snake_case__ : Optional[Any] = dqa_pipeline({"""image""": None, """word_boxes""": word_boxes, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
{"""score""": 0.9999, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.9998, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
] , )
@slow
@require_torch
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = pipeline(
"""document-question-answering""" , model="""naver-clova-ix/donut-base-finetuned-docvqa""" , tokenizer=AutoTokenizer.from_pretrained("""naver-clova-ix/donut-base-finetuned-docvqa""" ) , feature_extractor="""naver-clova-ix/donut-base-finetuned-docvqa""" , )
snake_case__ : Dict = INVOICE_URL
snake_case__ : Dict = """What is the invoice number?"""
snake_case__ : Tuple = dqa_pipeline(image=__SCREAMING_SNAKE_CASE , question=__SCREAMING_SNAKE_CASE , top_k=2 )
self.assertEqual(nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [{"""answer""": """us-001"""}] )
@require_tf
@unittest.skip("""Document question answering not implemented in TF""" )
def __UpperCamelCase ( self ):
pass
| 38 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class A_ :
"""simple docstring"""
def __init__( self : Dict ,__A : Any ,__A : Tuple=None ,__A : Optional[int]=None ,__A : Optional[int]=None ,__A : int="resnet50" ,__A : int=3 ,__A : List[Any]=32 ,__A : Tuple=3 ,__A : List[Any]=True ,__A : Tuple=True ,) -> Any:
_lowercase = parent
_lowercase = out_indices if out_indices is not None else [4]
_lowercase = stage_names
_lowercase = out_features
_lowercase = backbone
_lowercase = batch_size
_lowercase = image_size
_lowercase = num_channels
_lowercase = use_pretrained_backbone
_lowercase = is_training
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
_lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase = self.get_config()
return config, pixel_values
def __UpperCAmelCase ( self : Tuple ) -> Tuple:
return TimmBackboneConfig(
image_size=self.image_size ,num_channels=self.num_channels ,out_features=self.out_features ,out_indices=self.out_indices ,stage_names=self.stage_names ,use_pretrained_backbone=self.use_pretrained_backbone ,backbone=self.backbone ,)
def __UpperCAmelCase ( self : Any ,__A : Any ,__A : Dict ) -> Union[str, Any]:
_lowercase = TimmBackbone(config=__A )
model.to(__A )
model.eval()
with torch.no_grad():
_lowercase = model(__A )
self.parent.assertEqual(
result.feature_map[-1].shape ,(self.batch_size, model.channels[-1], 14, 14) ,)
def __UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
_lowercase = self.prepare_config_and_inputs()
_lowercase , _lowercase = config_and_inputs
_lowercase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class A_ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (TimmBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : List[str] = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : Tuple = False
SCREAMING_SNAKE_CASE_ : List[str] = False
SCREAMING_SNAKE_CASE_ : Any = False
def __UpperCAmelCase ( self : str ) -> Optional[int]:
_lowercase = TimmBackboneModelTester(self )
_lowercase = ConfigTester(self ,config_class=__A ,has_text_modality=__A )
def __UpperCAmelCase ( self : int ) -> Tuple:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCAmelCase ( self : List[Any] ) -> List[str]:
_lowercase = 'resnet18'
_lowercase = 'microsoft/resnet-18'
_lowercase = AutoBackbone.from_pretrained(__A ,use_timm_backbone=__A )
_lowercase = AutoBackbone.from_pretrained(__A )
self.assertEqual(len(timm_model.out_features ) ,len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) ,len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels ,transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices ,(-1,) )
self.assertEqual(transformers_model.out_indices ,[len(timm_model.stage_names ) - 1] )
_lowercase = AutoBackbone.from_pretrained(__A ,use_timm_backbone=__A ,out_indices=[1, 2, 3] )
_lowercase = AutoBackbone.from_pretrained(__A ,out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices ,transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) ,len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels ,transformers_model.channels )
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking' )
def __UpperCAmelCase ( self : Optional[int] ) -> Any:
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' )
def __UpperCAmelCase ( self : Optional[int] ) -> Any:
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side' )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __UpperCAmelCase ( self : Any ) -> List[Any]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __UpperCAmelCase ( self : int ) -> Any:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' )
def __UpperCAmelCase ( self : Any ) -> Optional[Any]:
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.' )
def __UpperCAmelCase ( self : Any ) -> Any:
pass
@unittest.skip('Safetensors is not supported by timm.' )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __UpperCAmelCase ( self : int ) -> Optional[Any]:
pass
def __UpperCAmelCase ( self : Dict ) -> int:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = model_class(__A )
_lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase = [*signature.parameters.keys()]
_lowercase = ['pixel_values']
self.assertListEqual(arg_names[:1] ,__A )
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase = True
_lowercase = self.has_attentions
# no need to test all models as different heads yield the same functionality
_lowercase = self.all_model_classes[0]
_lowercase = model_class(__A )
model.to(__A )
_lowercase = self._prepare_for_class(__A ,__A )
_lowercase = model(**__A )
_lowercase = outputs[0][-1]
# Encoder-/Decoder-only models
_lowercase = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
_lowercase = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__A )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __UpperCAmelCase ( self : List[str] ) -> int:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = model_class(__A )
model.to(__A )
model.eval()
_lowercase = model(**__A )
self.assertEqual(len(result.feature_maps ) ,len(config.out_indices ) )
self.assertEqual(len(model.channels ) ,len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
_lowercase = copy.deepcopy(__A )
_lowercase = None
_lowercase = model_class(__A )
model.to(__A )
model.eval()
_lowercase = model(**__A )
self.assertEqual(len(result.feature_maps ) ,1 )
self.assertEqual(len(model.channels ) ,1 )
# Check backbone can be initialized with fresh weights
_lowercase = copy.deepcopy(__A )
_lowercase = False
_lowercase = model_class(__A )
model.to(__A )
model.eval()
_lowercase = model(**__A ) | 67 | 0 |
from __future__ import annotations
import math
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if len(SCREAMING_SNAKE_CASE__ ) != 2 or len(a[0] ) != 2 or len(SCREAMING_SNAKE_CASE__ ) != 2 or len(b[0] ) != 2:
raise Exception('''Matrices are not 2x2''' )
snake_case_ = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(SCREAMING_SNAKE_CASE__ ) )
]
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(SCREAMING_SNAKE_CASE__ ) )
]
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if len(SCREAMING_SNAKE_CASE__ ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('''Odd matrices are not supported!''' )
snake_case_ = len(SCREAMING_SNAKE_CASE__ )
snake_case_ = matrix_length // 2
snake_case_ = [[a[i][j] for j in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )] for i in range(SCREAMING_SNAKE_CASE__ )]
snake_case_ = [
[a[i][j] for j in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )] for i in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
]
snake_case_ = [[a[i][j] for j in range(SCREAMING_SNAKE_CASE__ )] for i in range(SCREAMING_SNAKE_CASE__ )]
snake_case_ = [[a[i][j] for j in range(SCREAMING_SNAKE_CASE__ )] for i in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )]
return top_left, top_right, bot_left, bot_right
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
return len(SCREAMING_SNAKE_CASE__ ), len(matrix[0] )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
print('''\n'''.join(str(SCREAMING_SNAKE_CASE__ ) for line in matrix ) )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if matrix_dimensions(SCREAMING_SNAKE_CASE__ ) == (2, 2):
return default_matrix_multiplication(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_, snake_case_, snake_case_, snake_case_ = split_matrix(SCREAMING_SNAKE_CASE__ )
snake_case_, snake_case_, snake_case_, snake_case_ = split_matrix(SCREAMING_SNAKE_CASE__ )
snake_case_ = actual_strassen(SCREAMING_SNAKE_CASE__ , matrix_subtraction(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
snake_case_ = actual_strassen(matrix_addition(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
snake_case_ = actual_strassen(matrix_addition(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
snake_case_ = actual_strassen(SCREAMING_SNAKE_CASE__ , matrix_subtraction(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
snake_case_ = actual_strassen(matrix_addition(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , matrix_addition(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
snake_case_ = actual_strassen(matrix_subtraction(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , matrix_addition(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
snake_case_ = actual_strassen(matrix_subtraction(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , matrix_addition(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
snake_case_ = matrix_addition(matrix_subtraction(matrix_addition(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
snake_case_ = matrix_addition(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ = matrix_addition(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ = matrix_subtraction(matrix_subtraction(matrix_addition(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
# construct the new matrix from our 4 quadrants
snake_case_ = []
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if matrix_dimensions(SCREAMING_SNAKE_CASE__ )[1] != matrix_dimensions(SCREAMING_SNAKE_CASE__ )[0]:
snake_case_ = (
'''Unable to multiply these matrices, please check the dimensions.\n'''
F'''Matrix A: {matrixa}\n'''
F'''Matrix B: {matrixa}'''
)
raise Exception(SCREAMING_SNAKE_CASE__ )
snake_case_ = matrix_dimensions(SCREAMING_SNAKE_CASE__ )
snake_case_ = matrix_dimensions(SCREAMING_SNAKE_CASE__ )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
snake_case_ = max(*SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ )
snake_case_ = int(math.pow(2 , math.ceil(math.loga(SCREAMING_SNAKE_CASE__ ) ) ) )
snake_case_ = matrixa
snake_case_ = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , SCREAMING_SNAKE_CASE__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , SCREAMING_SNAKE_CASE__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , SCREAMING_SNAKE_CASE__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
snake_case_ = actual_strassen(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Removing the additional zeros
for i in range(0 , SCREAMING_SNAKE_CASE__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , SCREAMING_SNAKE_CASE__ ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
lowerCAmelCase_ = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
lowerCAmelCase_ = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa)) | 39 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 67 | 0 |
import math
import os
import sys
def UpperCamelCase ( snake_case__ : str ) -> str:
UpperCamelCase : Tuple = ''
try:
with open(snake_case__ , 'rb' ) as binary_file:
UpperCamelCase : Dict = binary_file.read()
for dat in data:
UpperCamelCase : str = F"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def UpperCamelCase ( snake_case__ : dict[str, str] , snake_case__ : str , snake_case__ : int , snake_case__ : str ) -> None:
lexicon.pop(snake_case__ )
UpperCamelCase : Tuple = last_match_id
if math.loga(snake_case__ ).is_integer():
for curr_key in lexicon:
UpperCamelCase : List[str] = '0' + lexicon[curr_key]
UpperCamelCase : List[str] = bin(snake_case__ )[2:]
def UpperCamelCase ( snake_case__ : str ) -> str:
UpperCamelCase : int = {'0': '0', '1': '1'}
UpperCamelCase , UpperCamelCase : Union[str, Any] = '', ''
UpperCamelCase : str = len(snake_case__ )
for i in range(len(snake_case__ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
UpperCamelCase : List[Any] = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
index += 1
UpperCamelCase : Optional[int] = ''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
UpperCamelCase : Tuple = lexicon[curr_string]
result += last_match_id
return result
def UpperCamelCase ( snake_case__ : str , snake_case__ : str ) -> str:
UpperCamelCase : Optional[Any] = os.path.getsize(snake_case__ )
UpperCamelCase : str = bin(snake_case__ )[2:]
UpperCamelCase : int = len(snake_case__ )
return "0" * (length_length - 1) + file_length_binary + compressed
def UpperCamelCase ( snake_case__ : str , snake_case__ : str ) -> None:
UpperCamelCase : List[Any] = 8
try:
with open(snake_case__ , 'wb' ) as opened_file:
UpperCamelCase : Union[str, Any] = [
to_write[i : i + byte_length]
for i in range(0 , len(snake_case__ ) , snake_case__ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(snake_case__ , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def UpperCamelCase ( snake_case__ : str , snake_case__ : str ) -> None:
UpperCamelCase : List[str] = read_file_binary(snake_case__ )
UpperCamelCase : Optional[int] = compress_data(snake_case__ )
UpperCamelCase : str = add_file_length(snake_case__ , snake_case__ )
write_file_binary(snake_case__ , snake_case__ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 40 |
snake_case = {str(digit): digit**5 for digit in range(1_0)}
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> int:
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(snake_case__ ) )
def SCREAMING_SNAKE_CASE__ ( ) -> int:
return sum(
number
for number in range(1000 , 100_0000 )
if number == digits_fifth_powers_sum(snake_case__ ) )
if __name__ == "__main__":
print(solution()) | 67 | 0 |
'''simple docstring'''
from __future__ import annotations
lowerCAmelCase__ = 8.988e9 # units = N * m^s * C^-2
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if distance < 0:
raise ValueError('''Distance cannot be negative''' )
if force == 0:
__lowercase = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
__lowercase = abs(A__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
__lowercase = abs(A__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
__lowercase = (COULOMBS_CONSTANT * charge_product / abs(A__ )) ** 0.5
return {"distance": distance}
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41 |
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> list[int]:
_lowercase = str(snake_case__ )
_lowercase = [n]
for i in range(1 , len(snake_case__ ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> bool:
if len(str(snake_case__ ) ) > 3:
if not is_prime(int(str(snake_case__ )[-3:] ) ) or not is_prime(int(str(snake_case__ )[:3] ) ):
return False
return True
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int = 11 ) -> list[int]:
_lowercase = []
_lowercase = 13
while len(snake_case__ ) != count:
if validate(snake_case__ ):
_lowercase = list_truncated_nums(snake_case__ )
if all(is_prime(snake_case__ ) for i in list_nums ):
list_truncated_primes.append(snake_case__ )
num += 2
return list_truncated_primes
def SCREAMING_SNAKE_CASE__ ( ) -> int:
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F"""{sum(compute_truncated_primes(1_1)) = }""") | 67 | 0 |
'''simple docstring'''
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
def _UpperCamelCase ( __UpperCamelCase ) -> Dict:
print('Loading config file...' )
def flatten_yaml_as_dict(__UpperCamelCase ,__UpperCamelCase="" ,__UpperCamelCase="." ):
lowerCamelCase_ = []
for k, v in d.items():
lowerCamelCase_ = parent_key + sep + k if parent_key else k
if isinstance(__UpperCamelCase ,collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(__UpperCamelCase ,__UpperCamelCase ,sep=__UpperCamelCase ).items() )
else:
items.append((new_key, v) )
return dict(__UpperCamelCase )
lowerCamelCase_ = argparse.Namespace()
with open(__UpperCamelCase ,'r' ) as yaml_file:
try:
lowerCamelCase_ = yaml.load(__UpperCamelCase ,Loader=yaml.FullLoader )
lowerCamelCase_ = flatten_yaml_as_dict(__UpperCamelCase )
for k, v in flat_cfg.items():
setattr(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
except yaml.YAMLError as exc:
logger.error('Error while loading config file: {}. Error message: {}'.format(__UpperCamelCase ,str(__UpperCamelCase ) ) )
return config
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> Optional[int]:
lowerCamelCase_ = MobileViTVaConfig()
lowerCamelCase_ = False
# dataset
if task_name.startswith('imagenet1k_' ):
lowerCamelCase_ = 10_00
if int(task_name.strip().split('_' )[-1] ) == 3_84:
lowerCamelCase_ = 3_84
else:
lowerCamelCase_ = 2_56
lowerCamelCase_ = 'imagenet-1k-id2label.json'
elif task_name.startswith('imagenet21k_to_1k_' ):
lowerCamelCase_ = 2_10_00
if int(task_name.strip().split('_' )[-1] ) == 3_84:
lowerCamelCase_ = 3_84
else:
lowerCamelCase_ = 2_56
lowerCamelCase_ = 'imagenet-22k-id2label.json'
elif task_name.startswith('ade20k_' ):
lowerCamelCase_ = 1_51
lowerCamelCase_ = 5_12
lowerCamelCase_ = 'ade20k-id2label.json'
lowerCamelCase_ = True
elif task_name.startswith('voc_' ):
lowerCamelCase_ = 21
lowerCamelCase_ = 5_12
lowerCamelCase_ = 'pascal-voc-id2label.json'
lowerCamelCase_ = True
# orig_config
lowerCamelCase_ = load_orig_config_file(__UpperCamelCase )
assert getattr(__UpperCamelCase ,'model.classification.name' ,-1 ) == "mobilevit_v2", "Invalid model"
lowerCamelCase_ = getattr(__UpperCamelCase ,'model.classification.mitv2.width_multiplier' ,1.0 )
assert (
getattr(__UpperCamelCase ,'model.classification.mitv2.attn_norm_layer' ,-1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
lowerCamelCase_ = getattr(__UpperCamelCase ,'model.classification.activation.name' ,'swish' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
lowerCamelCase_ = getattr(__UpperCamelCase ,'model.segmentation.output_stride' ,16 )
if "_deeplabv3" in task_name:
lowerCamelCase_ = getattr(__UpperCamelCase ,'model.segmentation.deeplabv3.aspp_rates' ,[12, 24, 36] )
lowerCamelCase_ = getattr(__UpperCamelCase ,'model.segmentation.deeplabv3.aspp_out_channels' ,5_12 )
lowerCamelCase_ = getattr(__UpperCamelCase ,'model.segmentation.deeplabv3.aspp_dropout' ,0.1 )
# id2label
lowerCamelCase_ = 'huggingface/label-files'
lowerCamelCase_ = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type='dataset' ) ,'r' ) )
lowerCamelCase_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase_ = idalabel
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
return config
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> List[str]:
lowerCamelCase_ = dct.pop(__UpperCamelCase )
lowerCamelCase_ = val
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase=False ) -> Dict:
if base_model:
lowerCamelCase_ = ''
else:
lowerCamelCase_ = 'mobilevitv2.'
lowerCamelCase_ = []
for k in state_dict.keys():
if k[:8] == "encoder.":
lowerCamelCase_ = k[8:]
else:
lowerCamelCase_ = k
if ".block." in k:
lowerCamelCase_ = k_new.replace('.block.' ,'.' )
if ".conv." in k:
lowerCamelCase_ = k_new.replace('.conv.' ,'.convolution.' )
if ".norm." in k:
lowerCamelCase_ = k_new.replace('.norm.' ,'.normalization.' )
if "conv_1." in k:
lowerCamelCase_ = k_new.replace('conv_1.' ,f'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if f'''layer_{i}.''' in k:
lowerCamelCase_ = k_new.replace(f'''layer_{i}.''' ,f'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
lowerCamelCase_ = k_new.replace('.exp_1x1.' ,'.expand_1x1.' )
if ".red_1x1." in k:
lowerCamelCase_ = k_new.replace('.red_1x1.' ,'.reduce_1x1.' )
for i in [3, 4, 5]:
if f'''layer_{i}.0.''' in k:
lowerCamelCase_ = k_new.replace(f'''layer_{i}.0.''' ,f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if f'''layer_{i}.1.local_rep.0.''' in k:
lowerCamelCase_ = k_new.replace(f'''layer_{i}.1.local_rep.0.''' ,f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if f'''layer_{i}.1.local_rep.1.''' in k:
lowerCamelCase_ = k_new.replace(f'''layer_{i}.1.local_rep.1.''' ,f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
lowerCamelCase_ = [0, 1]
elif i == 4:
lowerCamelCase_ = [0, 1, 2, 3]
elif i == 5:
lowerCamelCase_ = [0, 1, 2]
for j in j_in:
if f'''layer_{i}.1.global_rep.{j}.''' in k:
lowerCamelCase_ = k_new.replace(
f'''layer_{i}.1.global_rep.{j}.''' ,f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if f'''layer_{i}.1.global_rep.{j+1}.''' in k:
lowerCamelCase_ = k_new.replace(
f'''layer_{i}.1.global_rep.{j+1}.''' ,f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if f'''layer_{i}.1.conv_proj.''' in k:
lowerCamelCase_ = k_new.replace(f'''layer_{i}.1.conv_proj.''' ,f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
lowerCamelCase_ = k_new.replace('pre_norm_attn.0.' ,'layernorm_before.' )
if "pre_norm_attn.1." in k:
lowerCamelCase_ = k_new.replace('pre_norm_attn.1.' ,'attention.' )
if "pre_norm_ffn.0." in k:
lowerCamelCase_ = k_new.replace('pre_norm_ffn.0.' ,'layernorm_after.' )
if "pre_norm_ffn.1." in k:
lowerCamelCase_ = k_new.replace('pre_norm_ffn.1.' ,'ffn.conv1.' )
if "pre_norm_ffn.3." in k:
lowerCamelCase_ = k_new.replace('pre_norm_ffn.3.' ,'ffn.conv2.' )
if "classifier.1." in k:
lowerCamelCase_ = k_new.replace('classifier.1.' ,'classifier.' )
if "seg_head." in k:
lowerCamelCase_ = k_new.replace('seg_head.' ,'segmentation_head.' )
if ".aspp_layer." in k:
lowerCamelCase_ = k_new.replace('.aspp_layer.' ,'.' )
if ".aspp_pool." in k:
lowerCamelCase_ = k_new.replace('.aspp_pool.' ,'.' )
rename_keys.append((k, k_new) )
return rename_keys
def _UpperCamelCase ( __UpperCamelCase ) -> Optional[Any]:
lowerCamelCase_ = []
for k in state_dict.keys():
if k.startswith('seg_head.aux_head.' ):
keys_to_ignore.append(__UpperCamelCase )
for k in keys_to_ignore:
state_dict.pop(__UpperCamelCase ,__UpperCamelCase )
def _UpperCamelCase ( ) -> Optional[Any]:
lowerCamelCase_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
lowerCamelCase_ = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[int]:
lowerCamelCase_ = get_mobilevitva_config(__UpperCamelCase ,__UpperCamelCase )
# load original state_dict
lowerCamelCase_ = torch.load(__UpperCamelCase ,map_location='cpu' )
# load huggingface model
if task_name.startswith('ade20k_' ) or task_name.startswith('voc_' ):
lowerCamelCase_ = MobileViTVaForSemanticSegmentation(__UpperCamelCase ).eval()
lowerCamelCase_ = False
else:
lowerCamelCase_ = MobileViTVaForImageClassification(__UpperCamelCase ).eval()
lowerCamelCase_ = False
# remove and rename some keys of load the original model
lowerCamelCase_ = checkpoint
remove_unused_keys(__UpperCamelCase )
lowerCamelCase_ = create_rename_keys(__UpperCamelCase ,base_model=__UpperCamelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# load modified state_dict
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowerCamelCase_ = MobileViTImageProcessor(crop_size=config.image_size ,size=config.image_size + 32 )
lowerCamelCase_ = image_processor(images=prepare_img() ,return_tensors='pt' )
lowerCamelCase_ = model(**__UpperCamelCase )
# verify classification model
if task_name.startswith('imagenet' ):
lowerCamelCase_ = outputs.logits
lowerCamelCase_ = logits.argmax(-1 ).item()
print('Predicted class:' ,model.config.idalabel[predicted_class_idx] )
if task_name.startswith('imagenet1k_256' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
lowerCamelCase_ = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] )
assert torch.allclose(logits[0, :3] ,__UpperCamelCase ,atol=1e-4 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task",
default="imagenet1k_256",
type=str,
help=(
"Name of the task for which the MobileViTV2 model you'd like to convert is trained on . "
"\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n "
),
choices=[
"imagenet1k_256",
"imagenet1k_384",
"imagenet21k_to_1k_256",
"imagenet21k_to_1k_384",
"ade20k_deeplabv3",
"voc_deeplabv3",
],
)
parser.add_argument(
"--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
A_ = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 42 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class A_ :
"""simple docstring"""
def __init__( self : Optional[Any] ,__A : Tuple ,__A : Any=99 ,__A : Any=13 ,__A : Dict=7 ,__A : List[Any]=9 ,__A : Dict=True ,__A : Any=True ,__A : Tuple=False ,__A : str=32 ,__A : int=5 ,__A : List[str]=4 ,__A : Optional[Any]=37 ,__A : int=8 ,__A : Any=0.1 ,__A : Dict=0.002 ,__A : Union[str, Any]=1 ,__A : Optional[Any]=0 ,__A : int=0 ,__A : Tuple=None ,__A : str=None ,) -> List[Any]:
_lowercase = parent
_lowercase = batch_size
_lowercase = encoder_seq_length
_lowercase = decoder_seq_length
# For common tests
_lowercase = self.decoder_seq_length
_lowercase = is_training
_lowercase = use_attention_mask
_lowercase = use_labels
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = d_ff
_lowercase = relative_attention_num_buckets
_lowercase = dropout_rate
_lowercase = initializer_factor
_lowercase = eos_token_id
_lowercase = pad_token_id
_lowercase = decoder_start_token_id
_lowercase = None
_lowercase = decoder_layers
def __UpperCAmelCase ( self : Dict ) -> Dict:
return TaConfig.from_pretrained('google/umt5-base' )
def __UpperCAmelCase ( self : Optional[int] ,__A : Optional[int] ,__A : int ,__A : str ,__A : List[str]=None ,__A : List[str]=None ,__A : Any=None ,__A : List[Any]=None ,__A : str=None ,) -> Tuple:
if attention_mask is None:
_lowercase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_lowercase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_lowercase = torch.ones(config.num_hidden_layers ,config.num_attention_heads ,device=__A )
if decoder_head_mask is None:
_lowercase = torch.ones(config.num_decoder_layers ,config.num_attention_heads ,device=__A )
if cross_attn_head_mask is None:
_lowercase = torch.ones(
config.num_decoder_layers ,config.num_attention_heads ,device=__A )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def __UpperCAmelCase ( self : Union[str, Any] ) -> int:
_lowercase = ids_tensor([self.batch_size, self.encoder_seq_length] ,self.vocab_size )
_lowercase = ids_tensor([self.batch_size, self.decoder_seq_length] ,self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_lowercase = input_ids.clamp(self.pad_token_id + 1 )
_lowercase = decoder_input_ids.clamp(self.pad_token_id + 1 )
_lowercase = self.get_config()
_lowercase = config.num_attention_heads
_lowercase = self.prepare_inputs_dict(__A ,__A ,__A )
return config, input_dict
def __UpperCAmelCase ( self : Dict ) -> str:
_lowercase , _lowercase = self.prepare_config_and_inputs()
return config, inputs_dict
def __UpperCAmelCase ( self : Dict ) -> Tuple:
return TaConfig(
vocab_size=166 ,d_model=self.hidden_size ,d_ff=self.d_ff ,d_kv=self.hidden_size // self.num_attention_heads ,num_layers=self.num_hidden_layers ,num_decoder_layers=self.decoder_layers ,num_heads=self.num_attention_heads ,relative_attention_num_buckets=self.relative_attention_num_buckets ,dropout_rate=self.dropout_rate ,initializer_factor=self.initializer_factor ,eos_token_id=self.eos_token_id ,bos_token_id=self.pad_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.decoder_start_token_id ,)
def __UpperCAmelCase ( self : Dict ) -> Any:
return TaConfig(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,d_ff=self.d_ff ,d_kv=self.hidden_size // self.num_attention_heads ,num_layers=self.num_hidden_layers ,num_decoder_layers=self.decoder_layers ,num_heads=self.num_attention_heads ,relative_attention_num_buckets=self.relative_attention_num_buckets ,dropout_rate=self.dropout_rate ,initializer_factor=self.initializer_factor ,eos_token_id=self.eos_token_id ,bos_token_id=self.pad_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.decoder_start_token_id ,)
def __UpperCAmelCase ( self : Union[str, Any] ,__A : List[str] ,__A : Dict ,__A : List[str] ,__A : List[Any] ,__A : Tuple ,__A : int ,) -> Tuple:
_lowercase = UMTaModel(config=__A )
model.to(__A )
model.eval()
_lowercase = model(
input_ids=__A ,decoder_input_ids=__A ,attention_mask=__A ,decoder_attention_mask=__A ,)
_lowercase = model(input_ids=__A ,decoder_input_ids=__A )
_lowercase = result.last_hidden_state
_lowercase = result.past_key_values
_lowercase = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() ,(self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() ,(self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(__A ) ,config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) ,4 )
def __UpperCAmelCase ( self : List[Any] ,__A : Tuple ,__A : int ,__A : Any ,__A : Tuple ,__A : Any ,__A : Optional[int] ,) -> List[str]:
_lowercase = UMTaModel(config=__A ).get_decoder().to(__A ).eval()
# first forward pass
_lowercase = model(__A ,use_cache=__A )
_lowercase = model(__A )
_lowercase = model(__A ,use_cache=__A )
self.parent.assertTrue(len(__A ) == len(__A ) )
self.parent.assertTrue(len(__A ) == len(__A ) + 1 )
_lowercase , _lowercase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowercase = ids_tensor((self.batch_size, 1) ,config.vocab_size )
# append to next input_ids and
_lowercase = torch.cat([input_ids, next_tokens] ,dim=-1 )
_lowercase = model(__A )['last_hidden_state']
_lowercase = model(__A ,past_key_values=__A )['last_hidden_state']
# select random slice
_lowercase = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
_lowercase = output_from_no_past[:, -1, random_slice_idx].detach()
_lowercase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__A ,__A ,atol=1e-3 ) )
def __UpperCAmelCase ( self : Any ,__A : List[str] ,__A : List[str] ,) -> int:
_lowercase = UMTaModel(config=__A ).to(__A ).half().eval()
_lowercase = model(**__A )['last_hidden_state']
self.parent.assertFalse(torch.isnan(__A ).any().item() )
@require_torch
class A_ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE_ : Tuple = (UMTaForConditionalGeneration,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : str = (
{
'''conversational''': UMTaForConditionalGeneration,
'''feature-extraction''': UMTaModel,
'''summarization''': UMTaForConditionalGeneration,
'''text2text-generation''': UMTaForConditionalGeneration,
'''translation''': UMTaForConditionalGeneration,
'''question-answering''': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
SCREAMING_SNAKE_CASE_ : int = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : List[Any] = True
SCREAMING_SNAKE_CASE_ : int = True
# The small UMT5 model needs higher percentages for CPU/MP tests
SCREAMING_SNAKE_CASE_ : Dict = [0.8, 0.9]
def __UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
_lowercase = UMTaModelTester(self )
@unittest.skip('Test has a segmentation fault on torch 1.8.0' )
def __UpperCAmelCase ( self : int ) -> str:
_lowercase = self.model_tester.prepare_config_and_inputs()
_lowercase = UMTaModel(config_and_inputs[0] ).to(__A )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
__A ,(config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) ,F"""{tmpdirname}/t5_test.onnx""" ,export_params=__A ,opset_version=9 ,input_names=['input_ids', 'decoder_input_ids'] ,)
@unittest.skipIf(torch_device == 'cpu' ,'Cant do half precision' )
def __UpperCAmelCase ( self : List[Any] ) -> str:
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*__A )
def __UpperCAmelCase ( self : List[str] ) -> int:
_lowercase = ['encoder_attentions', 'decoder_attentions', 'cross_attentions']
_lowercase = self.model_tester.prepare_config_and_inputs()
_lowercase = config_and_inputs[0]
_lowercase = UMTaForConditionalGeneration(__A ).eval()
model.to(__A )
_lowercase = {
'head_mask': torch.zeros(config.num_layers ,config.num_heads ,device=__A ),
'decoder_head_mask': torch.zeros(config.num_decoder_layers ,config.num_heads ,device=__A ),
'cross_attn_head_mask': torch.zeros(config.num_decoder_layers ,config.num_heads ,device=__A ),
}
for attn_name, (name, mask) in zip(__A ,head_masking.items() ):
_lowercase = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
_lowercase = torch.ones(
config.num_decoder_layers ,config.num_heads ,device=__A )
_lowercase = model.generate(
config_and_inputs[1]['input_ids'] ,num_beams=1 ,max_length=3 ,output_attentions=__A ,return_dict_in_generate=__A ,**__A ,)
# We check the state of decoder_attentions and cross_attentions just from the last step
_lowercase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) ,0.0 )
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.' )
def __UpperCAmelCase ( self : str ) -> List[Any]:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class A_ ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged' )
def __UpperCAmelCase ( self : int ) -> List[str]:
_lowercase = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' ,return_dict=__A ).to(__A )
_lowercase = AutoTokenizer.from_pretrained('google/umt5-small' ,use_fast=__A ,legacy=__A )
_lowercase = [
'Bonjour monsieur <extra_id_0> bien <extra_id_1>.',
'No se como puedo <extra_id_0>.',
'This is the reason why we <extra_id_0> them.',
'The <extra_id_0> walks in <extra_id_1>, seats',
'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.',
]
_lowercase = tokenizer(__A ,return_tensors='pt' ,padding=__A ).input_ids
# fmt: off
_lowercase = torch.tensor(
[
[ 3_8530, 21_0703, 25_6299, 1410, 25_6298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 2_5922, 25_6299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 1_9014, 1_0620, 758, 25_6299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 25_6299, 1_4869, 281, 301, 25_6298, 275, 11_9983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 25_6299, 1_4869, 281, 2234, 289, 2275, 333,6_1391, 289, 25_6298, 543, 25_6297, 16_8714, 329, 25_6296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(__A ,__A )
_lowercase = model.generate(input_ids.to(__A ) )
_lowercase = [
'<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>',
'<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
]
_lowercase = tokenizer.batch_decode(__A )
self.assertEqual(__A ,__A ) | 67 | 0 |
from heapq import heappop, heappush
import numpy as np
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase__ , lowercase__ = grid.shape
lowercase__ = [-1, 1, 0, 0]
lowercase__ = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
lowercase__ , lowercase__ = [(0, source)], set()
lowercase__ = np.full((rows, cols) , np.inf )
lowercase__ = 0
lowercase__ = np.empty((rows, cols) , dtype=SCREAMING_SNAKE_CASE )
lowercase__ = None
while queue:
((lowercase__) , (lowercase__)) = heappop(SCREAMING_SNAKE_CASE )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
lowercase__ = []
while (x, y) != source:
path.append((x, y) )
lowercase__ , lowercase__ = predecessors[x, y]
path.append(SCREAMING_SNAKE_CASE ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(SCREAMING_SNAKE_CASE ) ):
lowercase__ , lowercase__ = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
lowercase__ = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(SCREAMING_SNAKE_CASE , (dist + 1, (nx, ny)) )
lowercase__ = dist + 1
lowercase__ = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue_model_parallelism.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1_6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1_6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
] )
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Any ) -> Optional[int]:
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() ,encoding='utf-8' ,check=__A ,)
assert hasattr(self ,'env' )
def __UpperCAmelCase ( self : str ,__A : Tuple ) -> int:
# configuration for running training on smdistributed Model Parallel
_lowercase = {
'enabled': True,
'processes_per_host': 8,
}
_lowercase = {
'enabled': True,
'parameters': {
'microbatches': 4,
'placement_strategy': 'spread',
'pipeline': 'interleaved',
'optimize': 'speed',
'partitions': 4,
'ddp': True,
},
}
_lowercase = {'smdistributed': {'modelparallel': smp_options}, 'mpi': mpi_options}
_lowercase = 'trainer' if self.script == 'run_glue.py' else 'smtrainer'
# creates estimator
return HuggingFace(
entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" ,instance_count=__A ,instance_type=self.instance_type ,debugger_hook_config=__A ,hyperparameters={
**self.env.hyperparameters,
'model_name_or_path': self.model_name_or_path,
'max_steps': 500,
} ,metric_definitions=self.env.metric_definitions ,distribution=__A ,py_version='py36' ,)
def __UpperCAmelCase ( self : List[Any] ,__A : Any ) -> Optional[Any]:
TrainingJobAnalytics(__A ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def __UpperCAmelCase ( self : Tuple ,__A : Union[str, Any] ) -> Optional[Any]:
# create estimator
_lowercase = self.create_estimator(__A )
# run training
estimator.fit()
# result dataframe
_lowercase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_lowercase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
_lowercase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_lowercase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' ,99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" ,'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} ,__A ) | 67 | 0 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
return x if y == 0 else greatest_common_divisor(_lowerCAmelCase , x % y )
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
return (x * y) // greatest_common_divisor(_lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : int = 20 ):
"""simple docstring"""
_lowerCamelCase : Tuple = 1
for i in range(1 , n + 1 ):
_lowerCamelCase : Any = lcm(_lowerCAmelCase , _lowerCAmelCase )
return g
if __name__ == "__main__":
print(f'''{solution() = }''') | 44 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json""",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''blenderbot-small'''
SCREAMING_SNAKE_CASE_ : int = ['''past_key_values''']
SCREAMING_SNAKE_CASE_ : Tuple = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[str] ,__A : List[Any]=5_0265 ,__A : str=512 ,__A : Optional[int]=8 ,__A : Any=2048 ,__A : Tuple=16 ,__A : str=8 ,__A : int=2048 ,__A : List[str]=16 ,__A : Optional[int]=0.0 ,__A : Any=0.0 ,__A : int=True ,__A : List[Any]=True ,__A : Tuple="gelu" ,__A : Any=512 ,__A : Dict=0.1 ,__A : Tuple=0.0 ,__A : int=0.0 ,__A : int=0.02 ,__A : Dict=1 ,__A : str=False ,__A : Dict=0 ,__A : Union[str, Any]=1 ,__A : Optional[int]=2 ,__A : List[str]=2 ,**__A : Tuple ,) -> Tuple:
_lowercase = vocab_size
_lowercase = max_position_embeddings
_lowercase = d_model
_lowercase = encoder_ffn_dim
_lowercase = encoder_layers
_lowercase = encoder_attention_heads
_lowercase = decoder_ffn_dim
_lowercase = decoder_layers
_lowercase = decoder_attention_heads
_lowercase = dropout
_lowercase = attention_dropout
_lowercase = activation_dropout
_lowercase = activation_function
_lowercase = init_std
_lowercase = encoder_layerdrop
_lowercase = decoder_layerdrop
_lowercase = use_cache
_lowercase = encoder_layers
_lowercase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__A ,bos_token_id=__A ,eos_token_id=__A ,is_encoder_decoder=__A ,decoder_start_token_id=__A ,forced_eos_token_id=__A ,**__A ,)
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_lowercase = {0: 'batch'}
_lowercase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
_lowercase = {0: 'batch', 1: 'decoder_sequence'}
_lowercase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__A ,direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
_lowercase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_lowercase , _lowercase = self.num_layers
for i in range(__A ):
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
else:
_lowercase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def __UpperCAmelCase ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = super().outputs
else:
_lowercase = super(__A ,self ).outputs
if self.use_past:
_lowercase , _lowercase = self.num_layers
for i in range(__A ):
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def __UpperCAmelCase ( self : Optional[int] ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,__A ,__A ,__A ,__A )
# Generate decoder inputs
_lowercase = seq_length if not self.use_past else 1
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,__A ,__A ,__A ,__A )
_lowercase = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
_lowercase = dict(**__A ,**__A )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowercase , _lowercase = common_inputs['input_ids'].shape
_lowercase = common_inputs['decoder_input_ids'].shape[1]
_lowercase , _lowercase = self.num_attention_heads
_lowercase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowercase = decoder_seq_length + 3
_lowercase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_lowercase = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(__A ,__A )] ,dim=1 )
_lowercase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_lowercase , _lowercase = self.num_layers
_lowercase = min(__A ,__A )
_lowercase = max(__A ,__A ) - min_num_layers
_lowercase = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(__A ):
common_inputs["past_key_values"].append(
(
torch.zeros(__A ),
torch.zeros(__A ),
torch.zeros(__A ),
torch.zeros(__A ),
) )
# TODO: test this.
_lowercase = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(__A ,__A ):
common_inputs["past_key_values"].append((torch.zeros(__A ), torch.zeros(__A )) )
return common_inputs
def __UpperCAmelCase ( self : List[Any] ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,__A ,__A ,__A ,__A )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowercase , _lowercase = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_lowercase = seqlen + 2
_lowercase , _lowercase = self.num_layers
_lowercase , _lowercase = self.num_attention_heads
_lowercase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowercase = common_inputs['attention_mask'].dtype
_lowercase = torch.cat(
[common_inputs['attention_mask'], torch.ones(__A ,__A ,dtype=__A )] ,dim=1 )
_lowercase = [
(torch.zeros(__A ), torch.zeros(__A )) for _ in range(__A )
]
return common_inputs
def __UpperCAmelCase ( self : Any ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowercase = compute_effective_axis_dimension(
__A ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowercase = tokenizer.num_special_tokens_to_add(__A )
_lowercase = compute_effective_axis_dimension(
__A ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=__A )
# Generate dummy inputs according to compute batch and sequence
_lowercase = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
_lowercase = dict(tokenizer(__A ,return_tensors=__A ) )
return common_inputs
def __UpperCAmelCase ( self : Dict ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A )
elif self.task == "causal-lm":
_lowercase = self._generate_dummy_inputs_for_causal_lm(
__A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A )
else:
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A )
return common_inputs
def __UpperCAmelCase ( self : List[str] ,__A : Dict ,__A : Any ,__A : List[Any] ,__A : Tuple ) -> Union[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = super()._flatten_past_key_values_(__A ,__A ,__A ,__A )
else:
_lowercase = super(__A ,self )._flatten_past_key_values_(
__A ,__A ,__A ,__A ) | 67 | 0 |
from PIL import Image
def A ( lowercase__ : Image , lowercase__ : float ) -> Image:
def brightness(lowercase__ : int ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(lowercase__ )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change brightness to 100
UpperCamelCase = change_brightness(img, 100)
brigt_img.save("image_data/lena_brightness.png", format="png") | 45 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : Any ) -> str:
torch.manual_seed(0 )
_lowercase = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=('DownBlock2D', 'AttnDownBlock2D') ,up_block_types=('AttnUpBlock2D', 'UpBlock2D') ,)
return model
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
_lowercase = self.dummy_uncond_unet
_lowercase = KarrasVeScheduler()
_lowercase = KarrasVePipeline(unet=__A ,scheduler=__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_lowercase = torch.manual_seed(0 )
_lowercase = pipe(num_inference_steps=2 ,generator=__A ,output_type='numpy' ).images
_lowercase = torch.manual_seed(0 )
_lowercase = pipe(num_inference_steps=2 ,generator=__A ,output_type='numpy' ,return_dict=__A )[0]
_lowercase = image[0, -3:, -3:, -1]
_lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowercase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
_lowercase = 'google/ncsnpp-celebahq-256'
_lowercase = UNetaDModel.from_pretrained(__A )
_lowercase = KarrasVeScheduler()
_lowercase = KarrasVePipeline(unet=__A ,scheduler=__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_lowercase = torch.manual_seed(0 )
_lowercase = pipe(num_inference_steps=20 ,generator=__A ,output_type='numpy' ).images
_lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_lowercase = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 67 | 0 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase ) -> Dict:
'''simple docstring'''
if not head:
return True
# split the list to two parts
_lowerCamelCase, _lowerCamelCase : List[str] = head.next, head
while fast and fast.next:
_lowerCamelCase : Union[str, Any] = fast.next.next
_lowerCamelCase : str = slow.next
_lowerCamelCase : Any = slow.next
_lowerCamelCase : Tuple = None # Don't forget here! But forget still works!
# reverse the second part
_lowerCamelCase : int = None
while second:
_lowerCamelCase : Union[str, Any] = second.next
_lowerCamelCase : Dict = node
_lowerCamelCase : List[Any] = second
_lowerCamelCase : Optional[Any] = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
_lowerCamelCase : List[Any] = node.next
_lowerCamelCase : Dict = head.next
return True
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
_lowerCamelCase : Tuple = head
while fast and fast.next:
_lowerCamelCase, _lowerCamelCase : str = fast.next.next, slow.next
# 2. Push the second half into the stack
_lowerCamelCase : Dict = [slow.val]
while slow.next:
_lowerCamelCase : Tuple = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
_lowerCamelCase : Optional[int] = cur.next
return True
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
if not head or not head.next:
return True
_lowerCamelCase : Dict = {}
_lowerCamelCase : Optional[Any] = 0
while head:
if head.val in d:
d[head.val].append(_lowerCamelCase )
else:
_lowerCamelCase : Any = [pos]
_lowerCamelCase : Any = head.next
pos += 1
_lowerCamelCase : str = pos - 1
_lowerCamelCase : Optional[int] = 0
for v in d.values():
if len(_lowerCamelCase ) % 2 != 0:
middle += 1
else:
_lowerCamelCase : Optional[int] = 0
for i in range(0 , len(_lowerCamelCase ) ):
if v[i] + v[len(_lowerCamelCase ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True | 46 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str , snake_case__ :str ) -> list:
_lowercase = len(snake_case__ )
_lowercase = []
for i in range(len(snake_case__ ) - pat_len + 1 ):
_lowercase = True
for j in range(snake_case__ ):
if s[i + j] != pattern[j]:
_lowercase = False
break
if match_found:
position.append(snake_case__ )
return position
if __name__ == "__main__":
assert naive_pattern_search("""ABCDEFG""", """DE""") == [3]
print(naive_pattern_search("""ABAAABCDBBABCDDEBCABC""", """ABC""")) | 67 | 0 |
def UpperCAmelCase__ ( lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : int=False ):
if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and isinstance(lowerCamelCase_ , lowerCamelCase_ ):
__a : Tuple = len(set_a.intersection(lowerCamelCase_ ) )
if alternative_union:
__a : Union[str, Any] = len(lowerCamelCase_ ) + len(lowerCamelCase_ )
else:
__a : Union[str, Any] = len(set_a.union(lowerCamelCase_ ) )
return intersection / union
if isinstance(lowerCamelCase_ , (list, tuple) ) and isinstance(lowerCamelCase_ , (list, tuple) ):
__a : int = [element for element in set_a if element in set_b]
if alternative_union:
__a : List[str] = len(lowerCamelCase_ ) + len(lowerCamelCase_ )
return len(lowerCamelCase_ ) / union
else:
__a : Optional[int] = set_a + [element for element in set_b if element not in set_a]
return len(lowerCamelCase_ ) / len(lowerCamelCase_ )
return len(lowerCamelCase_ ) / len(lowerCamelCase_ )
return None
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = {'''a''', '''b''', '''c''', '''d''', '''e'''}
SCREAMING_SNAKE_CASE__ = {'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''}
print(jaccard_similarity(set_a, set_b))
| 47 |
from typing import Any
import numpy as np
def SCREAMING_SNAKE_CASE__ ( snake_case__ :np.ndarray ) -> bool:
return np.array_equal(snake_case__ , matrix.conjugate().T )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :np.ndarray , snake_case__ :np.ndarray ) -> Any:
_lowercase = v.conjugate().T
_lowercase = v_star.dot(snake_case__ )
assert isinstance(snake_case__ , np.ndarray )
return (v_star_dot.dot(snake_case__ )) / (v_star.dot(snake_case__ ))
def SCREAMING_SNAKE_CASE__ ( ) -> None:
_lowercase = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
_lowercase = np.array([[1], [2], [3]] )
assert is_hermitian(snake_case__ ), F"""{a} is not hermitian."""
print(rayleigh_quotient(snake_case__ , snake_case__ ) )
_lowercase = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(snake_case__ ), F"""{a} is not hermitian."""
assert rayleigh_quotient(snake_case__ , snake_case__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests() | 67 | 0 |
'''simple docstring'''
from copy import deepcopy
class A :
def __init__( self : List[str] , __magic_name__ : list[int] | None = None , __magic_name__ : int | None = None ):
"""simple docstring"""
if arr is None and size is not None:
lowerCAmelCase__ = size
lowerCAmelCase__ = [0] * size
elif arr is not None:
self.init(__magic_name__ )
else:
raise ValueError("Either arr or size must be specified" )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : list[int] ):
"""simple docstring"""
lowerCAmelCase__ = len(__magic_name__ )
lowerCAmelCase__ = deepcopy(__magic_name__ )
for i in range(1 , self.size ):
lowerCAmelCase__ = self.next_(__magic_name__ )
if j < self.size:
self.tree[j] += self.tree[i]
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
lowerCAmelCase__ = self.next_(__magic_name__ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def __SCREAMING_SNAKE_CASE ( __magic_name__ : int ):
"""simple docstring"""
return index + (index & (-index))
@staticmethod
def __SCREAMING_SNAKE_CASE ( __magic_name__ : int ):
"""simple docstring"""
return index - (index & (-index))
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : int , __magic_name__ : int ):
"""simple docstring"""
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
lowerCAmelCase__ = self.next_(__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : int , __magic_name__ : int ):
"""simple docstring"""
self.add(__magic_name__ , value - self.get(__magic_name__ ) )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : int ):
"""simple docstring"""
if right == 0:
return 0
lowerCAmelCase__ = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
lowerCAmelCase__ = self.prev(__magic_name__ )
return result
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : int , __magic_name__ : int ):
"""simple docstring"""
return self.prefix(__magic_name__ ) - self.prefix(__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : int ):
"""simple docstring"""
return self.query(__magic_name__ , index + 1 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : int ):
"""simple docstring"""
value -= self.tree[0]
if value < 0:
return -1
lowerCAmelCase__ = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
lowerCAmelCase__ = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple ,__A : Dict ,__A : List[Any]=7 ,__A : Dict=3 ,__A : Tuple=30 ,__A : Dict=400 ,__A : Any=True ,__A : List[Any]=None ,__A : Any=True ,__A : List[str]=[0.5, 0.5, 0.5] ,__A : Union[str, Any]=[0.5, 0.5, 0.5] ,__A : int=True ,__A : List[str]=1 / 255 ,__A : Union[str, Any]=True ,) -> List[Any]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_lowercase = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
_lowercase = parent
_lowercase = batch_size
_lowercase = num_channels
_lowercase = min_resolution
_lowercase = max_resolution
_lowercase = do_resize
_lowercase = size
_lowercase = do_normalize
_lowercase = image_mean
_lowercase = image_std
_lowercase = do_rescale
_lowercase = rescale_factor
_lowercase = do_pad
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __UpperCAmelCase ( self : Tuple ,__A : Union[str, Any] ,__A : List[str]=False ) -> Union[str, Any]:
if not batched:
_lowercase = image_inputs[0]
if isinstance(__A ,Image.Image ):
_lowercase , _lowercase = image.size
else:
_lowercase , _lowercase = image.shape[1], image.shape[2]
if w < h:
_lowercase = int(self.size['shortest_edge'] * h / w )
_lowercase = self.size['shortest_edge']
elif w > h:
_lowercase = self.size['shortest_edge']
_lowercase = int(self.size['shortest_edge'] * w / h )
else:
_lowercase = self.size['shortest_edge']
_lowercase = self.size['shortest_edge']
else:
_lowercase = []
for image in image_inputs:
_lowercase , _lowercase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_lowercase = max(__A ,key=lambda __A : item[0] )[0]
_lowercase = max(__A ,key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A_ ( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = DetaImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
_lowercase = DetaImageProcessingTester(self )
@property
def __UpperCAmelCase ( self : List[Any] ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
_lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A ,'image_mean' ) )
self.assertTrue(hasattr(__A ,'image_std' ) )
self.assertTrue(hasattr(__A ,'do_normalize' ) )
self.assertTrue(hasattr(__A ,'do_resize' ) )
self.assertTrue(hasattr(__A ,'do_rescale' ) )
self.assertTrue(hasattr(__A ,'do_pad' ) )
self.assertTrue(hasattr(__A ,'size' ) )
def __UpperCAmelCase ( self : str ) -> List[str]:
_lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad ,__A )
def __UpperCAmelCase ( self : List[Any] ) -> Any:
pass
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A ,Image.Image )
# Test not batched input
_lowercase = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A ,batched=__A )
_lowercase = image_processing(__A ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__A ,numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A ,np.ndarray )
# Test not batched input
_lowercase = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
_lowercase = image_processing(__A ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A ,batched=__A )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def __UpperCAmelCase ( self : Optional[Any] ) -> Any:
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__A ,torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A ,torch.Tensor )
# Test not batched input
_lowercase = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
_lowercase = image_processing(__A ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A ,batched=__A )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
@slow
def __UpperCAmelCase ( self : List[str] ) -> List[str]:
# prepare image and target
_lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' ,'r' ) as f:
_lowercase = json.loads(f.read() )
_lowercase = {'image_id': 3_9769, 'annotations': target}
# encode them
_lowercase = DetaImageProcessor()
_lowercase = image_processing(images=__A ,annotations=__A ,return_tensors='pt' )
# verify pixel values
_lowercase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape ,__A )
_lowercase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,__A ,atol=1e-4 ) )
# verify area
_lowercase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,__A ) )
# verify boxes
_lowercase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape ,__A )
_lowercase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,__A ,atol=1e-3 ) )
# verify image_id
_lowercase = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,__A ) )
# verify is_crowd
_lowercase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,__A ) )
# verify class_labels
_lowercase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,__A ) )
# verify orig_size
_lowercase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,__A ) )
# verify size
_lowercase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,__A ) )
@slow
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
# prepare image, target and masks_path
_lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' ,'r' ) as f:
_lowercase = json.loads(f.read() )
_lowercase = {'file_name': '000000039769.png', 'image_id': 3_9769, 'segments_info': target}
_lowercase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
_lowercase = DetaImageProcessor(format='coco_panoptic' )
_lowercase = image_processing(images=__A ,annotations=__A ,masks_path=__A ,return_tensors='pt' )
# verify pixel values
_lowercase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape ,__A )
_lowercase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,__A ,atol=1e-4 ) )
# verify area
_lowercase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,__A ) )
# verify boxes
_lowercase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape ,__A )
_lowercase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,__A ,atol=1e-3 ) )
# verify image_id
_lowercase = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,__A ) )
# verify is_crowd
_lowercase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,__A ) )
# verify class_labels
_lowercase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,__A ) )
# verify masks
_lowercase = 82_2873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() ,__A )
# verify orig_size
_lowercase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,__A ) )
# verify size
_lowercase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,__A ) ) | 67 | 0 |
"""simple docstring"""
def lowercase__ ( snake_case_ :float ):
return 10 - x * x
def lowercase__ ( snake_case_ :float , snake_case_ :float ):
# Bolzano theory in order to find if there is a root between a and b
if equation(snake_case_ ) * equation(snake_case_ ) >= 0:
raise ValueError('''Wrong space!''' )
__UpperCAmelCase = a
while (b - a) >= 0.01:
# Find middle point
__UpperCAmelCase = (a + b) / 2
# Check if middle point is root
if equation(snake_case_ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(snake_case_ ) * equation(snake_case_ ) < 0:
__UpperCAmelCase = c
else:
__UpperCAmelCase = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 49 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("""At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training""")
# TF training parameters
snake_case = False
snake_case = False
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Namespace ) -> Tuple:
return TrainCommand(snake_case__ )
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@staticmethod
def __UpperCAmelCase ( __A : ArgumentParser ) -> List[Any]:
_lowercase = parser.add_parser('train' ,help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data' ,type=__A ,required=__A ,help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' ,)
train_parser.add_argument(
'--column_label' ,type=__A ,default=0 ,help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text' ,type=__A ,default=1 ,help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id' ,type=__A ,default=2 ,help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row' ,action='store_true' ,help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data' ,type=__A ,default='' ,help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split' ,type=__A ,default=0.1 ,help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' ,)
train_parser.add_argument('--output' ,type=__A ,default='./' ,help='path to saved the trained model.' )
train_parser.add_argument(
'--task' ,type=__A ,default='text_classification' ,help='Task to train the model on.' )
train_parser.add_argument(
'--model' ,type=__A ,default='bert-base-uncased' ,help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size' ,type=__A ,default=32 ,help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size' ,type=__A ,default=64 ,help='Batch size for validation.' )
train_parser.add_argument('--learning_rate' ,type=__A ,default=3e-5 ,help='Learning rate.' )
train_parser.add_argument('--adam_epsilon' ,type=__A ,default=1e-08 ,help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=__A )
def __init__( self : Optional[Any] ,__A : Namespace ) -> Tuple:
_lowercase = logging.get_logger('transformers-cli/training' )
_lowercase = 'tf' if is_tf_available() else 'torch'
os.makedirs(args.output ,exist_ok=__A )
_lowercase = args.output
_lowercase = args.column_label
_lowercase = args.column_text
_lowercase = args.column_id
self.logger.info(F"""Loading {args.task} pipeline for {args.model}""" )
if args.task == "text_classification":
_lowercase = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F"""Loading dataset from {args.train_data}""" )
_lowercase = Processor.create_from_csv(
args.train_data ,column_label=args.column_label ,column_text=args.column_text ,column_id=args.column_id ,skip_first_row=args.skip_first_row ,)
_lowercase = None
if args.validation_data:
self.logger.info(F"""Loading validation dataset from {args.validation_data}""" )
_lowercase = Processor.create_from_csv(
args.validation_data ,column_label=args.column_label ,column_text=args.column_text ,column_id=args.column_id ,skip_first_row=args.skip_first_row ,)
_lowercase = args.validation_split
_lowercase = args.train_batch_size
_lowercase = args.valid_batch_size
_lowercase = args.learning_rate
_lowercase = args.adam_epsilon
def __UpperCAmelCase ( self : Optional[Any] ) -> str:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
raise NotImplementedError
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
self.pipeline.fit(
self.train_dataset ,validation_data=self.valid_dataset ,validation_split=self.validation_split ,learning_rate=self.learning_rate ,adam_epsilon=self.adam_epsilon ,train_batch_size=self.train_batch_size ,valid_batch_size=self.valid_batch_size ,)
# Save trained pipeline
self.pipeline.save_pretrained(self.output ) | 67 | 0 |
'''simple docstring'''
def A__ ( __lowerCAmelCase : int ):
lowerCamelCase__ = int(__lowerCAmelCase )
if decimal in (0, 1): # Exit cases for the recursion
return str(__lowerCAmelCase )
lowerCamelCase__ , lowerCamelCase__ = divmod(__lowerCAmelCase , 2 )
return binary_recursive(__lowerCAmelCase ) + str(__lowerCAmelCase )
def A__ ( __lowerCAmelCase : str ):
lowerCamelCase__ = str(__lowerCAmelCase ).strip()
if not number:
raise ValueError("""No input value was provided""" )
lowerCamelCase__ = """-""" if number.startswith("""-""" ) else """"""
lowerCamelCase__ = number.lstrip("""-""" )
if not number.isnumeric():
raise ValueError("""Input value is not an integer""" )
return F'''{negative}0b{binary_recursive(int(__lowerCAmelCase ) )}'''
if __name__ == "__main__":
from doctest import testmod
testmod()
| 50 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Any ) -> str:
_lowercase = DPTConfig(embedding_type='hybrid' )
if "large" in checkpoint_url:
_lowercase = 1024
_lowercase = 4096
_lowercase = 24
_lowercase = 16
_lowercase = [5, 11, 17, 23]
_lowercase = [256, 512, 1024, 1024]
_lowercase = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
_lowercase = 768
_lowercase = [1, 1, 1, 0.5]
_lowercase = [256, 512, 768, 768]
_lowercase = 150
_lowercase = 16
_lowercase = (1, 384, 384)
_lowercase = False
_lowercase = 'project'
if "ade" in checkpoint_url:
_lowercase = True
_lowercase = 768
_lowercase = [1, 1, 1, 0.5]
_lowercase = 150
_lowercase = 16
_lowercase = 'huggingface/label-files'
_lowercase = 'ade20k-id2label.json'
_lowercase = json.load(open(cached_download(hf_hub_url(snake_case__ , snake_case__ , repo_type='dataset' ) ) , 'r' ) )
_lowercase = {int(snake_case__ ): v for k, v in idalabel.items()}
_lowercase = idalabel
_lowercase = {v: k for k, v in idalabel.items()}
_lowercase = [1, 150, 480, 480]
return config, expected_shape
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> str:
_lowercase = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[Any] ) -> Any:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
_lowercase = name.replace('pretrained.model' , 'dpt.encoder' )
if "pretrained.model" in name:
_lowercase = name.replace('pretrained.model' , 'dpt.embeddings' )
if "patch_embed" in name:
_lowercase = name.replace('patch_embed' , '' )
if "pos_embed" in name:
_lowercase = name.replace('pos_embed' , 'position_embeddings' )
if "attn.proj" in name:
_lowercase = name.replace('attn.proj' , 'attention.output.dense' )
if "proj" in name and "project" not in name:
_lowercase = name.replace('proj' , 'projection' )
if "blocks" in name:
_lowercase = name.replace('blocks' , 'layer' )
if "mlp.fc1" in name:
_lowercase = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_lowercase = name.replace('mlp.fc2' , 'output.dense' )
if "norm1" in name and "backbone" not in name:
_lowercase = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name and "backbone" not in name:
_lowercase = name.replace('norm2' , 'layernorm_after' )
if "scratch.output_conv" in name:
_lowercase = name.replace('scratch.output_conv' , 'head' )
if "scratch" in name:
_lowercase = name.replace('scratch' , 'neck' )
if "layer1_rn" in name:
_lowercase = name.replace('layer1_rn' , 'convs.0' )
if "layer2_rn" in name:
_lowercase = name.replace('layer2_rn' , 'convs.1' )
if "layer3_rn" in name:
_lowercase = name.replace('layer3_rn' , 'convs.2' )
if "layer4_rn" in name:
_lowercase = name.replace('layer4_rn' , 'convs.3' )
if "refinenet" in name:
_lowercase = int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
_lowercase = name.replace(F"""refinenet{layer_idx}""" , F"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
_lowercase = name.replace('out_conv' , 'projection' )
if "resConfUnit1" in name:
_lowercase = name.replace('resConfUnit1' , 'residual_layer1' )
if "resConfUnit2" in name:
_lowercase = name.replace('resConfUnit2' , 'residual_layer2' )
if "conv1" in name:
_lowercase = name.replace('conv1' , 'convolution1' )
if "conv2" in name:
_lowercase = name.replace('conv2' , 'convolution2' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
_lowercase = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0' )
if "pretrained.act_postprocess2.0.project.0" in name:
_lowercase = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0' )
if "pretrained.act_postprocess3.0.project.0" in name:
_lowercase = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0' )
if "pretrained.act_postprocess4.0.project.0" in name:
_lowercase = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
_lowercase = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection' )
if "pretrained.act_postprocess1.4" in name:
_lowercase = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize' )
if "pretrained.act_postprocess2.3" in name:
_lowercase = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection' )
if "pretrained.act_postprocess2.4" in name:
_lowercase = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize' )
if "pretrained.act_postprocess3.3" in name:
_lowercase = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection' )
if "pretrained.act_postprocess4.3" in name:
_lowercase = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection' )
if "pretrained.act_postprocess4.4" in name:
_lowercase = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize' )
if "pretrained" in name:
_lowercase = name.replace('pretrained' , 'dpt' )
if "bn" in name:
_lowercase = name.replace('bn' , 'batch_norm' )
if "head" in name:
_lowercase = name.replace('head' , 'head.head' )
if "encoder.norm" in name:
_lowercase = name.replace('encoder.norm' , 'layernorm' )
if "auxlayer" in name:
_lowercase = name.replace('auxlayer' , 'auxiliary_head.head' )
if "backbone" in name:
_lowercase = name.replace('backbone' , 'backbone.bit.encoder' )
if ".." in name:
_lowercase = name.replace('..' , '.' )
if "stem.conv" in name:
_lowercase = name.replace('stem.conv' , 'bit.embedder.convolution' )
if "blocks" in name:
_lowercase = name.replace('blocks' , 'layers' )
if "convolution" in name and "backbone" in name:
_lowercase = name.replace('convolution' , 'conv' )
if "layer" in name and "backbone" in name:
_lowercase = name.replace('layer' , 'layers' )
if "backbone.bit.encoder.bit" in name:
_lowercase = name.replace('backbone.bit.encoder.bit' , 'backbone.bit' )
if "embedder.conv" in name:
_lowercase = name.replace('embedder.conv' , 'embedder.convolution' )
if "backbone.bit.encoder.stem.norm" in name:
_lowercase = name.replace('backbone.bit.encoder.stem.norm' , 'backbone.bit.embedder.norm' )
return name
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[str] , snake_case__ :int ) -> Dict:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowercase = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
_lowercase = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowercase = in_proj_weight[: config.hidden_size, :]
_lowercase = in_proj_bias[: config.hidden_size]
_lowercase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowercase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowercase = in_proj_weight[
-config.hidden_size :, :
]
_lowercase = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
_lowercase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowercase = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[int] , snake_case__ :List[Any] , snake_case__ :str , snake_case__ :Any , snake_case__ :List[str] ) -> str:
_lowercase , _lowercase = get_dpt_config(snake_case__ )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
_lowercase = torch.load(snake_case__ , map_location='cpu' )
# remove certain keys
remove_ignore_keys_(snake_case__ )
# rename keys
for key in state_dict.copy().keys():
_lowercase = state_dict.pop(snake_case__ )
_lowercase = val
# read in qkv matrices
read_in_q_k_v(snake_case__ , snake_case__ )
# load HuggingFace model
_lowercase = DPTForSemanticSegmentation(snake_case__ ) if 'ade' in checkpoint_url else DPTForDepthEstimation(snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
# Check outputs on an image
_lowercase = 480 if 'ade' in checkpoint_url else 384
_lowercase = DPTImageProcessor(size=snake_case__ )
_lowercase = prepare_img()
_lowercase = image_processor(snake_case__ , return_tensors='pt' )
# forward pass
_lowercase = model(**snake_case__ ).logits if 'ade' in checkpoint_url else model(**snake_case__ ).predicted_depth
if show_prediction:
_lowercase = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='bicubic' , align_corners=snake_case__ , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(snake_case__ )
if push_to_hub:
model.push_to_hub('ybelkada/dpt-hybrid-midas' )
image_processor.push_to_hub('ybelkada/dpt-hybrid-midas' )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you're pushing to the hub.""",
)
parser.add_argument(
"""--show_prediction""",
action="""store_true""",
)
snake_case = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
) | 67 | 0 |
'''simple docstring'''
def __snake_case ( ) -> list[list[int]]:
"""simple docstring"""
return [list(range(1_000 - i , -1_000 - i , -1 ) ) for i in range(1_000 )]
a__ : str = generate_large_matrix()
a__ : Any = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __snake_case ( SCREAMING_SNAKE_CASE_ : list[list[int]] ) -> None:
"""simple docstring"""
assert all(row == sorted(SCREAMING_SNAKE_CASE_ , reverse=SCREAMING_SNAKE_CASE_ ) for row in grid )
assert all(list(SCREAMING_SNAKE_CASE_ ) == sorted(SCREAMING_SNAKE_CASE_ , reverse=SCREAMING_SNAKE_CASE_ ) for col in zip(*SCREAMING_SNAKE_CASE_ ) )
def __snake_case ( SCREAMING_SNAKE_CASE_ : list[int] ) -> int:
"""simple docstring"""
UpperCAmelCase = 0
UpperCAmelCase = len(SCREAMING_SNAKE_CASE_ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
UpperCAmelCase = (left + right) // 2
UpperCAmelCase = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
UpperCAmelCase = mid + 1
else:
UpperCAmelCase = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(SCREAMING_SNAKE_CASE_ )
def __snake_case ( SCREAMING_SNAKE_CASE_ : list[list[int]] ) -> int:
"""simple docstring"""
UpperCAmelCase = 0
UpperCAmelCase = len(grid[0] )
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
UpperCAmelCase = find_negative_index(grid[i][:bound] )
total += bound
return (len(SCREAMING_SNAKE_CASE_ ) * len(grid[0] )) - total
def __snake_case ( SCREAMING_SNAKE_CASE_ : list[list[int]] ) -> int:
"""simple docstring"""
return len([number for row in grid for number in row if number < 0] )
def __snake_case ( SCREAMING_SNAKE_CASE_ : list[list[int]] ) -> int:
"""simple docstring"""
UpperCAmelCase = 0
for row in grid:
for i, number in enumerate(SCREAMING_SNAKE_CASE_ ):
if number < 0:
total += len(SCREAMING_SNAKE_CASE_ ) - i
break
return total
def __snake_case ( ) -> None:
"""simple docstring"""
from timeit import timeit
print('''Running benchmarks''' )
UpperCAmelCase = (
'''from __main__ import count_negatives_binary_search, '''
'''count_negatives_brute_force, count_negatives_brute_force_with_break, grid'''
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
UpperCAmelCase = timeit(f"{func}(grid=grid)" , setup=SCREAMING_SNAKE_CASE_ , number=500 )
print(f"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 51 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
snake_case = {"""configuration_vit_mae""": ["""VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMAEConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMAEForPreTraining""",
"""ViTMAELayer""",
"""ViTMAEModel""",
"""ViTMAEPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""TFViTMAEForPreTraining""",
"""TFViTMAEModel""",
"""TFViTMAEPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 67 | 0 |
"""simple docstring"""
import itertools
import string
from collections.abc import Generator, Iterable
def __A ( a_ :Iterable[str] , a_ :int) -> Generator[tuple[str, ...], None, None]:
__a : List[str] = iter(a_)
while True:
__a : List[Any] = tuple(itertools.islice(a_ , a_))
if not chunk:
return
yield chunk
def __A ( a_ :str) -> str:
__a : int = ''''''.join([c.upper() for c in dirty if c in string.ascii_letters])
__a : Tuple = ''''''
if len(a_) < 2:
return dirty
for i in range(len(a_) - 1):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(a_) & 1:
clean += "X"
return clean
def __A ( a_ :str) -> list[str]:
# I and J are used interchangeably to allow
# us to use a 5x5 table (25 letters)
__a : Optional[Any] = '''ABCDEFGHIKLMNOPQRSTUVWXYZ'''
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
__a : Tuple = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(a_)
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(a_)
return table
def __A ( a_ :str , a_ :str) -> str:
__a : Optional[Any] = generate_table(a_)
__a : Optional[int] = prepare_input(a_)
__a : List[str] = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(a_ , 2):
__a , __a : Optional[Any] = divmod(table.index(a_) , 5)
__a , __a : Tuple = divmod(table.index(a_) , 5)
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def __A ( a_ :str , a_ :str) -> str:
__a : Any = generate_table(a_)
__a : Any = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(a_ , 2):
__a , __a : Any = divmod(table.index(a_) , 5)
__a , __a : Union[str, Any] = divmod(table.index(a_) , 5)
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext | 52 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline | 67 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
_snake_case : Optional[Any] = logging.get_logger(__name__)
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
def __init__( self : str , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : float , **lowerCAmelCase_ : List[str] ) -> Any:
__lowerCAmelCase = feature_size
__lowerCAmelCase = sampling_rate
__lowerCAmelCase = padding_value
__lowerCAmelCase = kwargs.pop('padding_side' , 'right' )
__lowerCAmelCase = kwargs.pop('return_attention_mask' , lowerCAmelCase_ )
super().__init__(**lowerCAmelCase_ )
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , lowerCAmelCase_ : Union[bool, str, PaddingStrategy] = True , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , ) -> BatchFeature:
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(lowerCAmelCase_ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
__lowerCAmelCase = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'
f""" to this method that includes {self.model_input_names[0]}, but you provided"""
f""" {list(processed_features.keys() )}""" )
__lowerCAmelCase = processed_features[self.model_input_names[0]]
__lowerCAmelCase = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(lowerCAmelCase_ ) == 0:
if return_attention_mask:
__lowerCAmelCase = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
__lowerCAmelCase = required_input[0]
if isinstance(lowerCAmelCase_ , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
__lowerCAmelCase = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(lowerCAmelCase_ ):
__lowerCAmelCase = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(lowerCAmelCase_ ):
__lowerCAmelCase = 'tf'
elif is_torch_tensor(lowerCAmelCase_ ):
__lowerCAmelCase = 'pt'
elif isinstance(lowerCAmelCase_ , (int, float, list, tuple, np.ndarray) ):
__lowerCAmelCase = 'np'
else:
raise ValueError(
f"""type of {first_element} unknown: {type(lowerCAmelCase_ )}. """
'Should be one of a python, numpy, pytorch or tensorflow object.' )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
__lowerCAmelCase = to_numpy(lowerCAmelCase_ )
else:
__lowerCAmelCase = [to_numpy(lowerCAmelCase_ ) for v in value]
# Convert padding_strategy in PaddingStrategy
__lowerCAmelCase = self._get_padding_strategies(padding=lowerCAmelCase_ , max_length=lowerCAmelCase_ )
__lowerCAmelCase = processed_features[self.model_input_names[0]]
__lowerCAmelCase = len(lowerCAmelCase_ )
if not all(len(lowerCAmelCase_ ) == batch_size for v in processed_features.values() ):
raise ValueError('Some items in the output dictionary have a different batch size than others.' )
__lowerCAmelCase = []
for i in range(lowerCAmelCase_ ):
__lowerCAmelCase = {k: v[i] for k, v in processed_features.items()}
# truncation
__lowerCAmelCase = self._truncate(
lowerCAmelCase_ , max_length=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , truncation=lowerCAmelCase_ , )
truncated_inputs.append(lowerCAmelCase_ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
__lowerCAmelCase = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
__lowerCAmelCase = PaddingStrategy.MAX_LENGTH
__lowerCAmelCase = {}
for i in range(lowerCAmelCase_ ):
# padding
__lowerCAmelCase = self._pad(
truncated_inputs[i] , max_length=lowerCAmelCase_ , padding_strategy=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , )
for key, value in outputs.items():
if key not in batch_outputs:
__lowerCAmelCase = []
if value.dtype is np.dtype(np.floataa ):
__lowerCAmelCase = value.astype(np.floataa )
batch_outputs[key].append(lowerCAmelCase_ )
return BatchFeature(lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
def lowercase ( self : Tuple , lowerCAmelCase_ : Union[Dict[str, np.ndarray], BatchFeature] , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[bool] = None , ) -> dict:
__lowerCAmelCase = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
__lowerCAmelCase = len(lowerCAmelCase_ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__lowerCAmelCase = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__lowerCAmelCase = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowerCAmelCase_ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
__lowerCAmelCase = np.ones(len(lowerCAmelCase_ ) , dtype=np.intaa )
if needs_to_be_padded:
__lowerCAmelCase = max_length - len(lowerCAmelCase_ )
if self.padding_side == "right":
if return_attention_mask:
__lowerCAmelCase = np.pad(
processed_features['attention_mask'] , (0, difference) )
__lowerCAmelCase = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
__lowerCAmelCase = np.pad(
lowerCAmelCase_ , lowerCAmelCase_ , 'constant' , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
__lowerCAmelCase = np.pad(
processed_features['attention_mask'] , (difference, 0) )
__lowerCAmelCase = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
__lowerCAmelCase = np.pad(
lowerCAmelCase_ , lowerCAmelCase_ , 'constant' , constant_values=self.padding_value )
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return processed_features
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : Union[Dict[str, np.ndarray], BatchFeature] , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[bool] = None , ) -> Any:
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('When setting ``truncation=True``, make sure that ``max_length`` is defined.' )
__lowerCAmelCase = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__lowerCAmelCase = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__lowerCAmelCase = len(lowerCAmelCase_ ) > max_length
if needs_to_be_truncated:
__lowerCAmelCase = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
__lowerCAmelCase = processed_features['attention_mask'][:max_length]
return processed_features
def lowercase ( self : Tuple , lowerCAmelCase_ : str=False , lowerCAmelCase_ : Optional[Any]=None ) -> List[str]:
# Get padding strategy
if padding is not False:
if padding is True:
__lowerCAmelCase = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowerCAmelCase = PaddingStrategy(lowerCAmelCase_ )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowerCAmelCase = padding
else:
__lowerCAmelCase = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'
' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.' )
return padding_strategy
| 53 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""",
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = '''transfo-xl'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['''mems''']
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Optional[Any] ,__A : Union[str, Any]=26_7735 ,__A : List[Any]=[2_0000, 4_0000, 20_0000] ,__A : Dict=1024 ,__A : str=1024 ,__A : Dict=16 ,__A : int=64 ,__A : Dict=4096 ,__A : List[Any]=4 ,__A : Optional[int]=False ,__A : Union[str, Any]=18 ,__A : Tuple=1600 ,__A : str=1000 ,__A : Dict=True ,__A : Dict=True ,__A : int=0 ,__A : Optional[int]=-1 ,__A : int=True ,__A : List[str]=0.1 ,__A : Optional[int]=0.0 ,__A : str=True ,__A : Tuple="normal" ,__A : Union[str, Any]=0.01 ,__A : Tuple=0.01 ,__A : Any=0.02 ,__A : Union[str, Any]=1e-5 ,__A : List[Any]=0 ,**__A : str ,) -> List[Any]:
_lowercase = vocab_size
_lowercase = []
self.cutoffs.extend(__A )
if proj_share_all_but_first:
_lowercase = [False] + [True] * len(self.cutoffs )
else:
_lowercase = [False] + [False] * len(self.cutoffs )
_lowercase = d_model
_lowercase = d_embed
_lowercase = d_head
_lowercase = d_inner
_lowercase = div_val
_lowercase = pre_lnorm
_lowercase = n_layer
_lowercase = n_head
_lowercase = mem_len
_lowercase = same_length
_lowercase = attn_type
_lowercase = clamp_len
_lowercase = sample_softmax
_lowercase = adaptive
_lowercase = dropout
_lowercase = dropatt
_lowercase = untie_r
_lowercase = init
_lowercase = init_range
_lowercase = proj_init_std
_lowercase = init_std
_lowercase = layer_norm_epsilon
super().__init__(eos_token_id=__A ,**__A )
@property
def __UpperCAmelCase ( self : str ) -> Optional[int]:
# Message copied from Transformer-XL documentation
logger.info(F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def __UpperCAmelCase ( self : Any ,__A : Dict ) -> Optional[Any]:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" ) | 67 | 0 |
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
__lowercase : Tuple =logging.get_logger(__name__)
def a__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__=False ):
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
if not is_sharded:
UpperCAmelCase_ =os.path.abspath(lowercase__ )
logger.info(F'Loading PyTorch weights from {pt_path}' )
UpperCAmelCase_ =torch.load(lowercase__ , map_location="cpu" )
logger.info(F'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' )
UpperCAmelCase_ =convert_pytorch_state_dict_to_flax(lowercase__ , lowercase__ )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
UpperCAmelCase_ =convert_pytorch_sharded_state_dict_to_flax(lowercase__ , lowercase__ )
return flax_state_dict
def a__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
'''simple docstring'''
def is_key_or_prefix_key_in_dict(lowercase__ ) -> bool:
return len(set(lowercase__ ) & {key, (model_prefix,) + key} ) > 0
# layer norm
UpperCAmelCase_ =pt_tuple_key[:-1] + ("scale",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(lowercase__ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
UpperCAmelCase_ =pt_tuple_key[:-1] + ("mean",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(lowercase__ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
UpperCAmelCase_ =pt_tuple_key[:-1] + ("var",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(lowercase__ ):
return renamed_pt_tuple_key, pt_tensor
# embedding
UpperCAmelCase_ =pt_tuple_key[:-1] + ("embedding",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(lowercase__ ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCAmelCase_ =pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(lowercase__ ):
UpperCAmelCase_ =pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCAmelCase_ =pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(lowercase__ ):
UpperCAmelCase_ =pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCAmelCase_ =pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCAmelCase_ =pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
UpperCAmelCase_ =None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
UpperCAmelCase_ =pt_tuple_key[-2] + "_g"
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
UpperCAmelCase_ =pt_tuple_key[-2] + "_v"
if name is not None:
UpperCAmelCase_ =pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ ={k: v.numpy() for k, v in pt_state_dict.items()}
UpperCAmelCase_ =flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
UpperCAmelCase_ =flax_model.params["params"]
else:
UpperCAmelCase_ =flax_model.params
UpperCAmelCase_ =flatten_dict(lowercase__ )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCAmelCase_ =flatten_dict(flax_model.params["batch_stats"] )
random_flax_state_dict.update(lowercase__ )
UpperCAmelCase_ ={}
UpperCAmelCase_ =(model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
UpperCAmelCase_ =(model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCAmelCase_ =tuple(pt_key.split("." ) )
# remove base model prefix if necessary
UpperCAmelCase_ =pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCAmelCase_ =pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCAmelCase_ , UpperCAmelCase_ =rename_key_and_reshape_tensor(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# add model prefix if necessary
UpperCAmelCase_ =(model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCAmelCase_ =(model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
UpperCAmelCase_ =jnp.asarray(lowercase__ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(lowercase__ , lowercase__ )
continue
# also add unexpected weight so that warning is thrown
UpperCAmelCase_ =jnp.asarray(lowercase__ )
else:
# also add unexpected weight so that warning is thrown
UpperCAmelCase_ =jnp.asarray(lowercase__ )
return unflatten_dict(lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
import torch
# Load the index
UpperCAmelCase_ ={}
for shard_file in shard_filenames:
# load using msgpack utils
UpperCAmelCase_ =torch.load(lowercase__ )
UpperCAmelCase_ ={k: v.numpy() for k, v in pt_state_dict.items()}
UpperCAmelCase_ =flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCAmelCase_ =flax_model.params["params"]
UpperCAmelCase_ =flatten_dict(lowercase__ )
random_flax_state_dict.update(flatten_dict(flax_model.params["batch_stats"] ) )
else:
UpperCAmelCase_ =flax_model.params
UpperCAmelCase_ =flatten_dict(lowercase__ )
UpperCAmelCase_ =(model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
UpperCAmelCase_ =(model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCAmelCase_ =tuple(pt_key.split("." ) )
# remove base model prefix if necessary
UpperCAmelCase_ =pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCAmelCase_ =pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCAmelCase_ , UpperCAmelCase_ =rename_key_and_reshape_tensor(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# add model prefix if necessary
UpperCAmelCase_ =(model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCAmelCase_ =(model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
UpperCAmelCase_ =jnp.asarray(lowercase__ )
continue
if "var" in flax_key[-1]:
UpperCAmelCase_ =jnp.asarray(lowercase__ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(lowercase__ , lowercase__ )
continue
# also add unexpected weight so that warning is thrown
UpperCAmelCase_ =jnp.asarray(lowercase__ )
else:
# also add unexpected weight so that warning is thrown
UpperCAmelCase_ =jnp.asarray(lowercase__ )
return unflatten_dict(lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =os.path.abspath(lowercase__ )
logger.info(F'Loading Flax weights from {flax_checkpoint_path}' )
# import correct flax class
UpperCAmelCase_ =getattr(lowercase__ , "Flax" + model.__class__.__name__ )
# load flax weight dict
with open(lowercase__ , "rb" ) as state_f:
try:
UpperCAmelCase_ =from_bytes(lowercase__ , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(lowercase__ , lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
# check if we have bf16 weights
UpperCAmelCase_ =flatten_dict(jax.tree_util.tree_map(lambda lowercase__ : x.dtype == jnp.bfloataa , lowercase__ ) ).values()
if any(lowercase__ ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` "
"before loading those in PyTorch model." )
UpperCAmelCase_ =jax.tree_util.tree_map(
lambda lowercase__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , lowercase__ )
UpperCAmelCase_ =flatten_dict(lowercase__ )
UpperCAmelCase_ =pt_model.state_dict()
UpperCAmelCase_ =(pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split("." )[0] for k in pt_model_dict.keys()}
)
UpperCAmelCase_ =(pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split("." )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
UpperCAmelCase_ =[]
UpperCAmelCase_ =set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
UpperCAmelCase_ =flax_key_tuple[0] == pt_model.base_model_prefix
UpperCAmelCase_ =".".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCAmelCase_ =flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
UpperCAmelCase_ =(pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(lowercase__ ) not in pt_model_dict:
# conv layer
UpperCAmelCase_ =flax_key_tuple[:-1] + ("weight",)
UpperCAmelCase_ =jnp.transpose(lowercase__ , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(lowercase__ ) not in pt_model_dict:
# linear layer
UpperCAmelCase_ =flax_key_tuple[:-1] + ("weight",)
UpperCAmelCase_ =flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
UpperCAmelCase_ =flax_key_tuple[:-1] + ("weight",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
UpperCAmelCase_ =flax_key_tuple[:-1] + ("running_mean",)
elif "var" in flax_key_tuple[-1]:
UpperCAmelCase_ =flax_key_tuple[:-1] + ("running_var",)
if "batch_stats" in flax_state:
UpperCAmelCase_ =".".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
UpperCAmelCase_ =".".join(lowercase__ )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
UpperCAmelCase_ ={}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
UpperCAmelCase_ =key.split("." )
UpperCAmelCase_ =None
if key_components[-3::2] == ["parametrizations", "original0"]:
UpperCAmelCase_ =key_components[-2] + "_g"
elif key_components[-3::2] == ["parametrizations", "original1"]:
UpperCAmelCase_ =key_components[-2] + "_v"
if name is not None:
UpperCAmelCase_ =key_components[:-3] + [name]
UpperCAmelCase_ =".".join(lowercase__ )
UpperCAmelCase_ =key
if flax_key in special_pt_names:
UpperCAmelCase_ =special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
UpperCAmelCase_ =np.asarray(lowercase__ ) if not isinstance(lowercase__ , np.ndarray ) else flax_tensor
UpperCAmelCase_ =torch.from_numpy(lowercase__ )
# remove from missing keys
missing_keys.remove(lowercase__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(lowercase__ )
pt_model.load_state_dict(lowercase__ )
# re-transform missing_keys to list
UpperCAmelCase_ =list(lowercase__ )
if len(lowercase__ ) > 0:
logger.warning(
"Some weights of the Flax model were not used when initializing the PyTorch model"
F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"
F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"
" FlaxBertForSequenceClassification model)." )
else:
logger.warning(F'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' )
if len(lowercase__ ) > 0:
logger.warning(
F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
" use it for predictions and inference." )
else:
logger.warning(
F'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'
"If your task is similar to the task the model of the checkpoint was trained on, "
F'you can already use {pt_model.__class__.__name__} for predictions without further training.' )
return pt_model
| 54 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = '''dpr'''
def __init__( self : int ,__A : Union[str, Any]=3_0522 ,__A : Optional[int]=768 ,__A : int=12 ,__A : List[Any]=12 ,__A : Optional[Any]=3072 ,__A : Union[str, Any]="gelu" ,__A : Union[str, Any]=0.1 ,__A : List[Any]=0.1 ,__A : str=512 ,__A : List[str]=2 ,__A : Tuple=0.02 ,__A : Tuple=1e-12 ,__A : List[Any]=0 ,__A : List[str]="absolute" ,__A : int = 0 ,**__A : int ,) -> Tuple:
super().__init__(pad_token_id=__A ,**__A )
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = hidden_act
_lowercase = intermediate_size
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = max_position_embeddings
_lowercase = type_vocab_size
_lowercase = initializer_range
_lowercase = layer_norm_eps
_lowercase = projection_dim
_lowercase = position_embedding_type | 67 | 0 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ) -> float | int:
"""simple docstring"""
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
__A = cst_fwd.get(a_ , np.inf )
__A = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
__A = new_cost_f
__A = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
__A = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def UpperCAmelCase ( a_ , a_ , a_ , a_ ) -> int:
"""simple docstring"""
__A = -1
__A = set()
__A = set()
__A = {source: 0}
__A = {destination: 0}
__A = {source: None}
__A = {destination: None}
__A = PriorityQueue()
__A = PriorityQueue()
__A = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
__A , __A = queue_forward.get()
visited_forward.add(a_ )
__A , __A = queue_backward.get()
visited_backward.add(a_ )
__A = pass_and_relaxation(
a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , )
__A = pass_and_relaxation(
a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
__A = shortest_distance
return shortest_path_distance
SCREAMING_SNAKE_CASE :List[str] = {
'B': [['C', 1]],
'C': [['D', 1]],
'D': [['F', 1]],
'E': [['B', 1], ['G', 2]],
'F': [],
'G': [['F', 1]],
}
SCREAMING_SNAKE_CASE :str = {
'B': [['E', 1]],
'C': [['B', 1]],
'D': [['C', 1]],
'F': [['D', 1], ['G', 1]],
'E': [[None, np.inf]],
'G': [['E', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
snake_case = Lock()
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[int] , snake_case__ :Union[str, Any] , snake_case__ :Tuple , snake_case__ :Any , snake_case__ :Dict , snake_case__ :Optional[int] , snake_case__ :List[str] ) -> Optional[Any]:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(snake_case__ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
_lowercase = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
_lowercase = min(snake_case__ , snake_case__ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(snake_case__ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
_lowercase = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
_lowercase = max(snake_case__ , snake_case__ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> Dict:
_lowercase = []
_lowercase = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
_lowercase = Pipe()
_lowercase = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
_lowercase = temp_rs
_lowercase = temp_rr
for i in range(1 , len(snake_case__ ) - 1 ):
_lowercase = Pipe()
_lowercase = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
_lowercase = temp_rs
_lowercase = temp_rr
process_array_.append(
Process(
target=snake_case__ , args=(
len(snake_case__ ) - 1,
arr[len(snake_case__ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(snake_case__ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(snake_case__ ) ):
_lowercase = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
_lowercase = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*snake_case__ )
_lowercase = odd_even_transposition(snake_case__ )
print('Sorted List\n' )
print(*snake_case__ )
if __name__ == "__main__":
main() | 67 | 0 |
'''simple docstring'''
from __future__ import annotations
_a : List[str] = "Muhammad Umer Farooq"
_a : List[Any] = "MIT"
_a : Optional[int] = "1.0.0"
_a : List[str] = "Muhammad Umer Farooq"
_a : List[Any] = "contact@muhammadumerfarooq.me"
_a : Union[str, Any] = "Alpha"
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class _lowercase ( __lowercase ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : str ) -> None:
super().__init__()
__snake_case = []
__snake_case = domain
def a ( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : list[tuple[str, str | None]] ) -> None:
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
__snake_case = parse.urljoin(self.domain , SCREAMING_SNAKE_CASE_ )
self.urls.append(SCREAMING_SNAKE_CASE_ )
def _a (lowercase__ : str ) -> str:
"""simple docstring"""
return ".".join(get_sub_domain_name(lowercase__ ).split('.' )[-2:] )
def _a (lowercase__ : str ) -> str:
"""simple docstring"""
return parse.urlparse(lowercase__ ).netloc
def _a (lowercase__ : str = "https://github.com" ) -> list[str]:
"""simple docstring"""
__snake_case = get_domain_name(lowercase__ )
# Initialize the parser
__snake_case = Parser(lowercase__ )
try:
# Open URL
__snake_case = requests.get(lowercase__ )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
__snake_case = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
__snake_case = requests.get(lowercase__ )
# Get the valid email.
__snake_case = re.findall('[a-zA-Z0-9]+@' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(lowercase__ )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(lowercase__ )
if __name__ == "__main__":
_a : Optional[int] = emails_from_url("https://github.com")
print(f'''{len(emails)} emails found:''')
print("\n".join(sorted(emails)))
| 56 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""",
"""google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""",
"""google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = '''big_bird'''
def __init__( self : str ,__A : Union[str, Any]=5_0358 ,__A : Any=768 ,__A : List[str]=12 ,__A : Union[str, Any]=12 ,__A : int=3072 ,__A : Tuple="gelu_new" ,__A : Any=0.1 ,__A : Optional[Any]=0.1 ,__A : Tuple=4096 ,__A : int=2 ,__A : Union[str, Any]=0.02 ,__A : Optional[int]=1e-12 ,__A : List[str]=True ,__A : List[Any]=0 ,__A : Optional[Any]=1 ,__A : Optional[int]=2 ,__A : Optional[int]=66 ,__A : Tuple="block_sparse" ,__A : Optional[int]=True ,__A : Optional[int]=False ,__A : Tuple=64 ,__A : str=3 ,__A : Optional[int]=None ,**__A : Dict ,) -> Union[str, Any]:
super().__init__(
pad_token_id=__A ,bos_token_id=__A ,eos_token_id=__A ,sep_token_id=__A ,**__A ,)
_lowercase = vocab_size
_lowercase = max_position_embeddings
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = intermediate_size
_lowercase = hidden_act
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = initializer_range
_lowercase = type_vocab_size
_lowercase = layer_norm_eps
_lowercase = use_cache
_lowercase = rescale_embeddings
_lowercase = attention_type
_lowercase = use_bias
_lowercase = block_size
_lowercase = num_random_blocks
_lowercase = classifier_dropout
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : int ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_lowercase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowercase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] ) | 67 | 0 |
A_ : Any = '0.18.2'
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor | 57 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> list:
_lowercase = [0] * len(snake_case__ )
for i in range(1 , len(snake_case__ ) ):
# use last results for better performance - dynamic programming
_lowercase = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
_lowercase = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
_lowercase = j
return prefix_result
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> int:
return max(prefix_function(snake_case__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 67 | 0 |
"""simple docstring"""
import numpy
# List of input, output pairs
__lowerCAmelCase : List[str] = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
__lowerCAmelCase : List[Any] = (((515, 22, 13), 555), ((61, 35, 49), 150))
__lowerCAmelCase : Union[str, Any] = [2, 4, 1, 5]
__lowerCAmelCase : Dict = len(train_data)
__lowerCAmelCase : int = 0.009
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Any="train" ):
'''simple docstring'''
return calculate_hypothesis_value(__UpperCamelCase , __UpperCamelCase ) - output(
__UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Optional[Any] = 0
for i in range(len(__UpperCamelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : List[Any] ):
'''simple docstring'''
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : Dict ):
'''simple docstring'''
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : int=m ):
'''simple docstring'''
snake_case_ : Union[str, Any] = 0
for i in range(__UpperCamelCase ):
if index == -1:
summation_value += _error(__UpperCamelCase )
else:
summation_value += _error(__UpperCamelCase ) * train_data[i][0][index]
return summation_value
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] ):
'''simple docstring'''
snake_case_ : Optional[Any] = summation_of_cost_derivative(__UpperCamelCase , __UpperCamelCase ) / m
return cost_derivative_value
def __lowerCAmelCase ( ):
'''simple docstring'''
global parameter_vector
# Tune these values to set a tolerance value for predicted output
snake_case_ : Optional[int] = 0.000_002
snake_case_ : Optional[Any] = 0
snake_case_ : Tuple = 0
while True:
j += 1
snake_case_ : str = [0, 0, 0, 0]
for i in range(0 , len(__UpperCamelCase ) ):
snake_case_ : List[str] = get_cost_derivative(i - 1 )
snake_case_ : List[str] = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
__UpperCamelCase , __UpperCamelCase , atol=__UpperCamelCase , rtol=__UpperCamelCase , ):
break
snake_case_ : List[str] = temp_parameter_vector
print(("""Number of iterations:""", j) )
def __lowerCAmelCase ( ):
'''simple docstring'''
for i in range(len(__UpperCamelCase ) ):
print(("""Actual output value:""", output(__UpperCamelCase , """test""" )) )
print(("""Hypothesis output:""", calculate_hypothesis_value(__UpperCamelCase , """test""" )) )
if __name__ == "__main__":
run_gradient_descent()
print('''\nTesting gradient descent for a linear hypothesis function.\n''')
test_gradient_descent()
| 58 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> Union[str, Any]:
_lowercase = len(snake_case__ )
_lowercase = sum(snake_case__ )
_lowercase = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
_lowercase = True
for i in range(1 , s + 1 ):
_lowercase = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
_lowercase = dp[i][j - 1]
if arr[i - 1] <= j:
_lowercase = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
_lowercase = s - 2 * j
break
return diff | 67 | 0 |
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : List[Any] , UpperCAmelCase_ : str = "cpu" , UpperCAmelCase_ : str = "openai/clip-vit-large-patch14") ->None:
'''simple docstring'''
lowerCamelCase__: Tuple =device
lowerCamelCase__: Optional[int] =CLIPTokenizerFast.from_pretrained(UpperCAmelCase_)
lowerCamelCase__: Any =[0.4814_5466, 0.457_8275, 0.4082_1073]
lowerCamelCase__: Union[str, Any] =[0.2686_2954, 0.2613_0258, 0.2757_7711]
lowerCamelCase__: Union[str, Any] =torchvision.transforms.Normalize(self.image_mean , self.image_std)
lowerCamelCase__: List[Any] =torchvision.transforms.Resize(224)
lowerCamelCase__: Dict =torchvision.transforms.CenterCrop(224)
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : str) ->Any:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =self.resize(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =self.center_crop(UpperCAmelCase_)
lowerCamelCase__: Dict =self.normalize(UpperCAmelCase_)
return images
def __call__(self : Any , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : str=None , **UpperCAmelCase_ : Tuple) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =self.tokenizer(text=UpperCAmelCase_ , **UpperCAmelCase_)
lowerCamelCase__: str =self.preprocess_img(UpperCAmelCase_)
lowerCamelCase__: str ={key: value.to(self.device) for (key, value) in encoding.items()}
return encoding
class _SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__(self : Any , UpperCAmelCase_ : List[Any]=10 , UpperCAmelCase_ : int=0.01 , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : str="image" , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : str=False , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : Optional[Any]=False , ) ->None:
'''simple docstring'''
super().__init__()
lowerCamelCase__: Optional[int] =None
lowerCamelCase__: str =device if device else get_device()
if vqgan:
lowerCamelCase__: Dict =vqgan
else:
lowerCamelCase__: Optional[Any] =load_vqgan(self.device , conf_path=UpperCAmelCase_ , ckpt_path=UpperCAmelCase_)
self.vqgan.eval()
if clip:
lowerCamelCase__: Union[str, Any] =clip
else:
lowerCamelCase__: str =CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
self.clip.to(self.device)
lowerCamelCase__: str =ProcessorGradientFlow(device=self.device)
lowerCamelCase__: Union[str, Any] =iterations
lowerCamelCase__: str =lr
lowerCamelCase__: str =log
lowerCamelCase__: str =make_grid
lowerCamelCase__: int =return_val
lowerCamelCase__: int =quantize
lowerCamelCase__: Dict =self.vqgan.decoder.z_shape
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : List[Any]=5 , UpperCAmelCase_ : Union[str, Any]=True) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Dict =[]
if output_path is None:
lowerCamelCase__: Union[str, Any] ="./animation.gif"
if input_path is None:
lowerCamelCase__: List[Any] =self.save_path
lowerCamelCase__: Any =sorted(glob(input_path + "/*"))
if not len(UpperCAmelCase_):
raise ValueError(
"No images found in save path, aborting (did you pass save_intermediate=True to the generate"
" function?)")
if len(UpperCAmelCase_) == 1:
print("Only one image found in save path, (did you pass save_intermediate=True to the generate function?)")
lowerCamelCase__: Tuple =total_duration / len(UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =[frame_duration] * len(UpperCAmelCase_)
if extend_frames:
lowerCamelCase__: int =1.5
lowerCamelCase__: Optional[int] =3
for file_name in paths:
if file_name.endswith(".png"):
images.append(imageio.imread(UpperCAmelCase_))
imageio.mimsave(UpperCAmelCase_ , UpperCAmelCase_ , duration=UpperCAmelCase_)
print(F"""gif saved to {output_path}""")
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Any=None) ->Tuple:
'''simple docstring'''
if not (path or img):
raise ValueError("Input either path or tensor")
if img is not None:
raise NotImplementedError
lowerCamelCase__: str =preprocess(Image.open(UpperCAmelCase_) , target_image_size=256).to(self.device)
lowerCamelCase__: Any =preprocess_vqgan(UpperCAmelCase_)
lowerCamelCase__ , *lowerCamelCase__: str =self.vqgan.encode(UpperCAmelCase_)
return z
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : List[Any]) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =self.latent.detach().requires_grad_()
lowerCamelCase__: List[Any] =base_latent + transform_vector
if self.quantize:
lowerCamelCase__ , *lowerCamelCase__: int =self.vqgan.quantize(UpperCAmelCase_)
else:
lowerCamelCase__: str =trans_latent
return self.vqgan.decode(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any]=None) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =self.clip_preprocessor(text=UpperCAmelCase_ , images=UpperCAmelCase_ , return_tensors="pt" , padding=UpperCAmelCase_)
lowerCamelCase__: List[str] =self.clip(**UpperCAmelCase_)
lowerCamelCase__: int =clip_outputs.logits_per_image
if weights is not None:
lowerCamelCase__: Optional[int] =similarity_logits * weights
return similarity_logits.sum()
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : int) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Dict =self._get_clip_similarity(pos_prompts["prompts"] , UpperCAmelCase_ , weights=(1 / pos_prompts["weights"]))
if neg_prompts:
lowerCamelCase__: Tuple =self._get_clip_similarity(neg_prompts["prompts"] , UpperCAmelCase_ , weights=neg_prompts["weights"])
else:
lowerCamelCase__: List[Any] =torch.tensor([1] , device=self.device)
lowerCamelCase__: Union[str, Any] =-torch.log(UpperCAmelCase_) + torch.log(UpperCAmelCase_)
return loss
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any]) ->int:
'''simple docstring'''
lowerCamelCase__: Dict =torch.randn_like(self.latent , requires_grad=UpperCAmelCase_ , device=self.device)
lowerCamelCase__: Any =torch.optim.Adam([vector] , lr=self.lr)
for i in range(self.iterations):
optim.zero_grad()
lowerCamelCase__: List[Any] =self._add_vector(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =loop_post_process(UpperCAmelCase_)
lowerCamelCase__: str =self._get_CLIP_loss(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
print("CLIP loss" , UpperCAmelCase_)
if self.log:
wandb.log({"CLIP Loss": clip_loss})
clip_loss.backward(retain_graph=UpperCAmelCase_)
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0])
else:
yield vector
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any]) ->str:
'''simple docstring'''
wandb.init(reinit=UpperCAmelCase_ , project="face-editor")
wandb.config.update({"Positive Prompts": positive_prompts})
wandb.config.update({"Negative Prompts": negative_prompts})
wandb.config.update({"lr": self.lr, "iterations": self.iterations})
if image_path:
lowerCamelCase__: Dict =Image.open(UpperCAmelCase_)
lowerCamelCase__: str =image.resize((256, 256))
wandb.log("Original Image" , wandb.Image(UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : Optional[int]) ->int:
'''simple docstring'''
if not prompts:
return []
lowerCamelCase__: Optional[Any] =[]
lowerCamelCase__: Union[str, Any] =[]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
lowerCamelCase__: Tuple =[prompt.strip() for prompt in prompts.split("|")]
for prompt in prompts:
if isinstance(UpperCAmelCase_ , (tuple, list)):
lowerCamelCase__: Optional[Any] =prompt[0]
lowerCamelCase__: Dict =float(prompt[1])
elif ":" in prompt:
lowerCamelCase__ , lowerCamelCase__: Optional[Any] =prompt.split(":")
lowerCamelCase__: str =float(UpperCAmelCase_)
else:
lowerCamelCase__: List[str] =prompt
lowerCamelCase__: Any =1.0
processed_prompts.append(UpperCAmelCase_)
weights.append(UpperCAmelCase_)
return {
"prompts": processed_prompts,
"weights": torch.tensor(UpperCAmelCase_ , device=self.device),
}
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Tuple=None , ) ->List[str]:
'''simple docstring'''
if image_path:
lowerCamelCase__: Any =self._get_latent(UpperCAmelCase_)
else:
lowerCamelCase__: int =torch.randn(self.latent_dim , device=self.device)
if self.log:
self._init_logging(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
assert pos_prompts, "You must provide at least one positive prompt."
lowerCamelCase__: Optional[int] =self.process_prompts(UpperCAmelCase_)
lowerCamelCase__: Dict =self.process_prompts(UpperCAmelCase_)
if save_final and save_path is None:
lowerCamelCase__: Optional[Any] =os.path.join("./outputs/" , "_".join(pos_prompts["prompts"]))
if not os.path.exists(UpperCAmelCase_):
os.makedirs(UpperCAmelCase_)
else:
lowerCamelCase__: Dict =save_path + "_" + get_timestamp()
os.makedirs(UpperCAmelCase_)
lowerCamelCase__: int =save_path
lowerCamelCase__: Dict =self.vqgan.decode(self.latent)[0]
if show_intermediate:
print("Original Image")
show_pil(custom_to_pil(UpperCAmelCase_))
lowerCamelCase__: List[Any] =loop_post_process(UpperCAmelCase_)
for iter, transformed_img in enumerate(self._optimize_CLIP(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)):
if show_intermediate:
show_pil(UpperCAmelCase_)
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F"""iter_{iter:03d}.png"""))
if self.log:
wandb.log({"Image": wandb.Image(UpperCAmelCase_)})
if show_final:
show_pil(UpperCAmelCase_)
if save_final:
transformed_img.save(os.path.join(self.save_path , F"""iter_{iter:03d}_final.png"""))
| 59 |
from manim import *
class A_ ( UpperCAmelCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
_lowercase = Rectangle(height=0.5 ,width=0.5 )
_lowercase = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
_lowercase = Rectangle(height=0.25 ,width=0.25 )
_lowercase = [mem.copy() for i in range(6 )]
_lowercase = [mem.copy() for i in range(6 )]
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = VGroup(__A ,__A ).arrange(__A ,buff=0 )
_lowercase = Text('CPU' ,font_size=24 )
_lowercase = Group(__A ,__A ).arrange(__A ,buff=0.5 ,aligned_edge=__A )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__A )
_lowercase = [mem.copy() for i in range(4 )]
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = Text('GPU' ,font_size=24 )
_lowercase = Group(__A ,__A ).arrange(__A ,buff=0.5 ,aligned_edge=__A )
gpu.move_to([-1, -1, 0] )
self.add(__A )
_lowercase = [mem.copy() for i in range(6 )]
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = Text('Model' ,font_size=24 )
_lowercase = Group(__A ,__A ).arrange(__A ,buff=0.5 ,aligned_edge=__A )
model.move_to([3, -1.0, 0] )
self.add(__A )
_lowercase = []
_lowercase = []
for i, rect in enumerate(__A ):
_lowercase = fill.copy().set_fill(__A ,opacity=0.8 )
target.move_to(__A )
model_arr.append(__A )
_lowercase = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(__A ,opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(__A )
self.add(*__A ,*__A )
_lowercase = [meta_mem.copy() for i in range(6 )]
_lowercase = [meta_mem.copy() for i in range(6 )]
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = VGroup(__A ,__A ).arrange(__A ,buff=0 )
_lowercase = Text('Disk' ,font_size=24 )
_lowercase = Group(__A ,__A ).arrange(__A ,buff=0.5 ,aligned_edge=__A )
disk.move_to([-4, -1.25, 0] )
self.add(__A ,__A )
_lowercase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_lowercase = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
self.add(__A ,__A )
_lowercase = MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" ,font_size=18 ,)
blue_text.next_to(__A ,DOWN * 2.4 ,aligned_edge=key_text.get_left() )
self.add(__A )
_lowercase = MarkupText(
F"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__A ) )
_lowercase = Square(0.3 )
input.set_fill(__A ,opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] ,__A ,buff=0.5 )
self.play(Write(__A ) )
input.generate_target()
input.target.next_to(model_arr[0] ,direction=__A ,buff=0.02 )
self.play(MoveToTarget(__A ) )
self.play(FadeOut(__A ) )
_lowercase = Arrow(start=__A ,end=__A ,color=__A ,buff=0.5 )
a.next_to(model_arr[0].get_left() ,__A ,buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
_lowercase = MarkupText(
F"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__A ,run_time=3 ) )
_lowercase = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(__A ) ,Circumscribe(model_arr[0] ,color=__A ,**__A ) ,Circumscribe(model_cpu_arr[0] ,color=__A ,**__A ) ,Circumscribe(gpu_rect[0] ,color=__A ,**__A ) ,)
self.play(MoveToTarget(model_cpu_arr[0] ) )
_lowercase = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 ,__A ,buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
_lowercase = AnimationGroup(
FadeOut(__A ,run_time=0.5 ) ,MoveToTarget(__A ,run_time=0.5 ) ,FadeIn(__A ,run_time=0.5 ) ,lag_ratio=0.2 )
self.play(__A )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
_lowercase = 0.7
self.play(
Circumscribe(model_arr[i] ,**__A ) ,Circumscribe(cpu_left_col_base[i] ,**__A ) ,Circumscribe(cpu_left_col_base[i + 1] ,color=__A ,**__A ) ,Circumscribe(gpu_rect[0] ,color=__A ,**__A ) ,Circumscribe(model_arr[i + 1] ,color=__A ,**__A ) ,)
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) ,MoveToTarget(model_cpu_arr[i + 1] ) ,)
else:
self.play(
MoveToTarget(model_cpu_arr[i] ,run_time=0.7 ) ,MoveToTarget(model_cpu_arr[i + 1] ,run_time=0.7 ) ,)
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() ,RIGHT + 0.02 ,buff=0.2 )
self.play(
Circumscribe(model_arr[-1] ,color=__A ,**__A ) ,Circumscribe(cpu_left_col_base[-1] ,color=__A ,**__A ) ,Circumscribe(gpu_rect[0] ,color=__A ,**__A ) ,)
self.play(MoveToTarget(model_cpu_arr[i] ) )
_lowercase = a_c
_lowercase = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] ,RIGHT + 0.02 ,buff=0.5 )
self.play(
FadeOut(__A ) ,FadeOut(__A ,run_time=0.5 ) ,)
_lowercase = MarkupText(F"""Inference on a model too large for GPU memory\nis successfully completed.""" ,font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__A ,run_time=3 ) ,MoveToTarget(__A ) )
self.wait() | 67 | 0 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
snake_case_ : Tuple = 0
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Dict = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(__magic_name__ , __magic_name__ )
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : Dict = Path(__magic_name__ ) / '''preprocessor_config.json'''
snake_case_ : Dict = Path(__magic_name__ ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__magic_name__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__magic_name__ , '''w''' ) )
snake_case_ : Optional[int] = AutoImageProcessor.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : Dict = Path(__magic_name__ ) / '''preprocessor_config.json'''
snake_case_ : Optional[int] = Path(__magic_name__ ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__magic_name__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__magic_name__ , '''w''' ) )
snake_case_ : List[Any] = AutoImageProcessor.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : Dict = CLIPConfig()
# Create a dummy config file with image_proceesor_type
snake_case_ : List[str] = Path(__magic_name__ ) / '''preprocessor_config.json'''
snake_case_ : List[str] = Path(__magic_name__ ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__magic_name__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__magic_name__ , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
snake_case_ : List[str] = AutoImageProcessor.from_pretrained(__magic_name__ ).to_dict()
config_dict.pop('''image_processor_type''' )
snake_case_ : List[Any] = CLIPImageProcessor(**__magic_name__ )
# save in new folder
model_config.save_pretrained(__magic_name__ )
config.save_pretrained(__magic_name__ )
snake_case_ : str = AutoImageProcessor.from_pretrained(__magic_name__ )
# make sure private variable is not incorrectly saved
snake_case_ : Union[str, Any] = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(__magic_name__ , __magic_name__ )
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : str = Path(__magic_name__ ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__magic_name__ , '''w''' ) , )
snake_case_ : str = AutoImageProcessor.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
with self.assertRaisesRegex(
__magic_name__ , '''clip-base is not a local folder and is not a valid model identifier''' ):
snake_case_ : Optional[Any] = AutoImageProcessor.from_pretrained('''clip-base''' )
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
with self.assertRaisesRegex(
__magic_name__ , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
snake_case_ : Union[str, Any] = AutoImageProcessor.from_pretrained(__magic_name__ , revision='''aaaaaa''' )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
with self.assertRaisesRegex(
__magic_name__ , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
snake_case_ : int = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
with self.assertRaises(__magic_name__ ):
snake_case_ : Tuple = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__magic_name__ ):
snake_case_ : List[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__magic_name__ )
snake_case_ : List[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__magic_name__ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__magic_name__ )
snake_case_ : Tuple = AutoImageProcessor.from_pretrained(__magic_name__ , trust_remote_code=__magic_name__ )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
try:
AutoConfig.register('''custom''' , __magic_name__ )
AutoImageProcessor.register(__magic_name__ , __magic_name__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__magic_name__ ):
AutoImageProcessor.register(__magic_name__ , __magic_name__ )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : Tuple = Path(__magic_name__ ) / '''preprocessor_config.json'''
snake_case_ : List[str] = Path(__magic_name__ ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__magic_name__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__magic_name__ , '''w''' ) )
snake_case_ : Tuple = CustomImageProcessor.from_pretrained(__magic_name__ )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__magic_name__ )
snake_case_ : Dict = AutoImageProcessor.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Union[str, Any] = True
try:
AutoConfig.register('''custom''' , __magic_name__ )
AutoImageProcessor.register(__magic_name__ , __magic_name__ )
# If remote code is not set, the default is to use local
snake_case_ : Dict = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
snake_case_ : Optional[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__magic_name__ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
snake_case_ : int = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__magic_name__ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(__magic_name__ , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 60 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class A_ :
"""simple docstring"""
def __init__( self : Dict ,__A : Any ,__A : Tuple=None ,__A : Optional[int]=None ,__A : Optional[int]=None ,__A : int="resnet50" ,__A : int=3 ,__A : List[Any]=32 ,__A : Tuple=3 ,__A : List[Any]=True ,__A : Tuple=True ,) -> Any:
_lowercase = parent
_lowercase = out_indices if out_indices is not None else [4]
_lowercase = stage_names
_lowercase = out_features
_lowercase = backbone
_lowercase = batch_size
_lowercase = image_size
_lowercase = num_channels
_lowercase = use_pretrained_backbone
_lowercase = is_training
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
_lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase = self.get_config()
return config, pixel_values
def __UpperCAmelCase ( self : Tuple ) -> Tuple:
return TimmBackboneConfig(
image_size=self.image_size ,num_channels=self.num_channels ,out_features=self.out_features ,out_indices=self.out_indices ,stage_names=self.stage_names ,use_pretrained_backbone=self.use_pretrained_backbone ,backbone=self.backbone ,)
def __UpperCAmelCase ( self : Any ,__A : Any ,__A : Dict ) -> Union[str, Any]:
_lowercase = TimmBackbone(config=__A )
model.to(__A )
model.eval()
with torch.no_grad():
_lowercase = model(__A )
self.parent.assertEqual(
result.feature_map[-1].shape ,(self.batch_size, model.channels[-1], 14, 14) ,)
def __UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
_lowercase = self.prepare_config_and_inputs()
_lowercase , _lowercase = config_and_inputs
_lowercase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class A_ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (TimmBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : List[str] = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : Tuple = False
SCREAMING_SNAKE_CASE_ : List[str] = False
SCREAMING_SNAKE_CASE_ : Any = False
def __UpperCAmelCase ( self : str ) -> Optional[int]:
_lowercase = TimmBackboneModelTester(self )
_lowercase = ConfigTester(self ,config_class=__A ,has_text_modality=__A )
def __UpperCAmelCase ( self : int ) -> Tuple:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCAmelCase ( self : List[Any] ) -> List[str]:
_lowercase = 'resnet18'
_lowercase = 'microsoft/resnet-18'
_lowercase = AutoBackbone.from_pretrained(__A ,use_timm_backbone=__A )
_lowercase = AutoBackbone.from_pretrained(__A )
self.assertEqual(len(timm_model.out_features ) ,len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) ,len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels ,transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices ,(-1,) )
self.assertEqual(transformers_model.out_indices ,[len(timm_model.stage_names ) - 1] )
_lowercase = AutoBackbone.from_pretrained(__A ,use_timm_backbone=__A ,out_indices=[1, 2, 3] )
_lowercase = AutoBackbone.from_pretrained(__A ,out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices ,transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) ,len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels ,transformers_model.channels )
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking' )
def __UpperCAmelCase ( self : Optional[int] ) -> Any:
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' )
def __UpperCAmelCase ( self : Optional[int] ) -> Any:
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side' )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __UpperCAmelCase ( self : Any ) -> List[Any]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __UpperCAmelCase ( self : int ) -> Any:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' )
def __UpperCAmelCase ( self : Any ) -> Optional[Any]:
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.' )
def __UpperCAmelCase ( self : Any ) -> Any:
pass
@unittest.skip('Safetensors is not supported by timm.' )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __UpperCAmelCase ( self : int ) -> Optional[Any]:
pass
def __UpperCAmelCase ( self : Dict ) -> int:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = model_class(__A )
_lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase = [*signature.parameters.keys()]
_lowercase = ['pixel_values']
self.assertListEqual(arg_names[:1] ,__A )
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase = True
_lowercase = self.has_attentions
# no need to test all models as different heads yield the same functionality
_lowercase = self.all_model_classes[0]
_lowercase = model_class(__A )
model.to(__A )
_lowercase = self._prepare_for_class(__A ,__A )
_lowercase = model(**__A )
_lowercase = outputs[0][-1]
# Encoder-/Decoder-only models
_lowercase = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
_lowercase = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__A )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __UpperCAmelCase ( self : List[str] ) -> int:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = model_class(__A )
model.to(__A )
model.eval()
_lowercase = model(**__A )
self.assertEqual(len(result.feature_maps ) ,len(config.out_indices ) )
self.assertEqual(len(model.channels ) ,len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
_lowercase = copy.deepcopy(__A )
_lowercase = None
_lowercase = model_class(__A )
model.to(__A )
model.eval()
_lowercase = model(**__A )
self.assertEqual(len(result.feature_maps ) ,1 )
self.assertEqual(len(model.channels ) ,1 )
# Check backbone can be initialized with fresh weights
_lowercase = copy.deepcopy(__A )
_lowercase = False
_lowercase = model_class(__A )
model.to(__A )
model.eval()
_lowercase = model(**__A ) | 67 | 0 |
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'
)
UpperCamelCase = None
UpperCamelCase = {
'7B': 1_1008,
'13B': 1_3824,
'30B': 1_7920,
'65B': 2_2016,
'70B': 2_8672,
}
UpperCamelCase = {
'7B': 1,
'7Bf': 1,
'13B': 2,
'13Bf': 2,
'30B': 4,
'65B': 8,
'70B': 8,
'70Bf': 8,
}
def _A ( lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any]=1 , lowerCAmelCase_ : Tuple=256 ):
"""simple docstring"""
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def _A ( lowerCAmelCase_ : Dict ):
"""simple docstring"""
with open(lowerCAmelCase_ , "r" ) as f:
return json.load(lowerCAmelCase_ )
def _A ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int ):
"""simple docstring"""
with open(lowerCAmelCase_ , "w" ) as f:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
def _A ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str]=True ):
"""simple docstring"""
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
lowerCAmelCase__ = os.path.join(lowerCAmelCase_ , "tmp" )
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
lowerCAmelCase__ = read_json(os.path.join(lowerCAmelCase_ , "params.json" ) )
lowerCAmelCase__ = NUM_SHARDS[model_size]
lowerCAmelCase__ = params["n_layers"]
lowerCAmelCase__ = params["n_heads"]
lowerCAmelCase__ = n_heads // num_shards
lowerCAmelCase__ = params["dim"]
lowerCAmelCase__ = dim // n_heads
lowerCAmelCase__ = 10000.0
lowerCAmelCase__ = 1.0 / (base ** (torch.arange(0 , lowerCAmelCase_ , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
lowerCAmelCase__ = params["n_kv_heads"] # for GQA / MQA
lowerCAmelCase__ = n_heads_per_shard // num_key_value_heads
lowerCAmelCase__ = dim // num_key_value_heads
else: # compatibility with other checkpoints
lowerCAmelCase__ = n_heads
lowerCAmelCase__ = n_heads_per_shard
lowerCAmelCase__ = dim
# permute for sliced rotary
def permute(lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any]=n_heads , lowerCAmelCase_ : Tuple=dim , lowerCAmelCase_ : Union[str, Any]=dim ):
return w.view(lowerCAmelCase_ , dima // n_heads // 2 , 2 , lowerCAmelCase_ ).transpose(1 , 2 ).reshape(lowerCAmelCase_ , lowerCAmelCase_ )
print(F'Fetching all parameters from the checkpoint at {input_base_path}.' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
lowerCAmelCase__ = torch.load(os.path.join(lowerCAmelCase_ , "consolidated.00.pth" ) , map_location="cpu" )
else:
# Sharded
lowerCAmelCase__ = [
torch.load(os.path.join(lowerCAmelCase_ , F'consolidated.{i:02d}.pth' ) , map_location="cpu" )
for i in range(lowerCAmelCase_ )
]
lowerCAmelCase__ = 0
lowerCAmelCase__ = {"weight_map": {}}
for layer_i in range(lowerCAmelCase_ ):
lowerCAmelCase__ = F'pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
lowerCAmelCase__ = {
F'model.layers.{layer_i}.self_attn.q_proj.weight': permute(
loaded[F'layers.{layer_i}.attention.wq.weight'] ),
F'model.layers.{layer_i}.self_attn.k_proj.weight': permute(
loaded[F'layers.{layer_i}.attention.wk.weight'] ),
F'model.layers.{layer_i}.self_attn.v_proj.weight': loaded[F'layers.{layer_i}.attention.wv.weight'],
F'model.layers.{layer_i}.self_attn.o_proj.weight': loaded[F'layers.{layer_i}.attention.wo.weight'],
F'model.layers.{layer_i}.mlp.gate_proj.weight': loaded[F'layers.{layer_i}.feed_forward.w1.weight'],
F'model.layers.{layer_i}.mlp.down_proj.weight': loaded[F'layers.{layer_i}.feed_forward.w2.weight'],
F'model.layers.{layer_i}.mlp.up_proj.weight': loaded[F'layers.{layer_i}.feed_forward.w3.weight'],
F'model.layers.{layer_i}.input_layernorm.weight': loaded[F'layers.{layer_i}.attention_norm.weight'],
F'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[F'layers.{layer_i}.ffn_norm.weight'],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
lowerCAmelCase__ = {
F'model.layers.{layer_i}.input_layernorm.weight': loaded[0][
F'layers.{layer_i}.attention_norm.weight'
].clone(),
F'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[0][
F'layers.{layer_i}.ffn_norm.weight'
].clone(),
}
lowerCAmelCase__ = permute(
torch.cat(
[
loaded[i][F'layers.{layer_i}.attention.wq.weight'].view(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
for i in range(lowerCAmelCase_ )
] , dim=0 , ).reshape(lowerCAmelCase_ , lowerCAmelCase_ ) )
lowerCAmelCase__ = permute(
torch.cat(
[
loaded[i][F'layers.{layer_i}.attention.wk.weight'].view(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
for i in range(lowerCAmelCase_ )
] , dim=0 , ).reshape(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
lowerCAmelCase__ = torch.cat(
[
loaded[i][F'layers.{layer_i}.attention.wv.weight'].view(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
for i in range(lowerCAmelCase_ )
] , dim=0 , ).reshape(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase__ = torch.cat(
[loaded[i][F'layers.{layer_i}.attention.wo.weight'] for i in range(lowerCAmelCase_ )] , dim=1 )
lowerCAmelCase__ = torch.cat(
[loaded[i][F'layers.{layer_i}.feed_forward.w1.weight'] for i in range(lowerCAmelCase_ )] , dim=0 )
lowerCAmelCase__ = torch.cat(
[loaded[i][F'layers.{layer_i}.feed_forward.w2.weight'] for i in range(lowerCAmelCase_ )] , dim=1 )
lowerCAmelCase__ = torch.cat(
[loaded[i][F'layers.{layer_i}.feed_forward.w3.weight'] for i in range(lowerCAmelCase_ )] , dim=0 )
lowerCAmelCase__ = inv_freq
for k, v in state_dict.items():
lowerCAmelCase__ = filename
param_count += v.numel()
torch.save(lowerCAmelCase_ , os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) )
lowerCAmelCase__ = F'pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
lowerCAmelCase__ = {
"model.embed_tokens.weight": loaded["tok_embeddings.weight"],
"model.norm.weight": loaded["norm.weight"],
"lm_head.weight": loaded["output.weight"],
}
else:
lowerCAmelCase__ = {
"model.norm.weight": loaded[0]["norm.weight"],
"model.embed_tokens.weight": torch.cat(
[loaded[i]["tok_embeddings.weight"] for i in range(lowerCAmelCase_ )] , dim=1 ),
"lm_head.weight": torch.cat([loaded[i]["output.weight"] for i in range(lowerCAmelCase_ )] , dim=0 ),
}
for k, v in state_dict.items():
lowerCAmelCase__ = filename
param_count += v.numel()
torch.save(lowerCAmelCase_ , os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) )
# Write configs
lowerCAmelCase__ = {"total_size": param_count * 2}
write_json(lowerCAmelCase_ , os.path.join(lowerCAmelCase_ , "pytorch_model.bin.index.json" ) )
lowerCAmelCase__ = params["ffn_dim_multiplier"] if "ffn_dim_multiplier" in params else 1
lowerCAmelCase__ = params["multiple_of"] if "multiple_of" in params else 256
lowerCAmelCase__ = LlamaConfig(
hidden_size=lowerCAmelCase_ , intermediate_size=compute_intermediate_size(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , num_attention_heads=params["n_heads"] , num_hidden_layers=params["n_layers"] , rms_norm_eps=params["norm_eps"] , num_key_value_heads=lowerCAmelCase_ , )
config.save_pretrained(lowerCAmelCase_ )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("Loading the checkpoint in a Llama model." )
lowerCAmelCase__ = LlamaForCausalLM.from_pretrained(lowerCAmelCase_ , torch_dtype=torch.floataa , low_cpu_mem_usage=lowerCAmelCase_ )
# Avoid saving this as part of the config.
del model.config._name_or_path
print("Saving in the Transformers format." )
model.save_pretrained(lowerCAmelCase_ , safe_serialization=lowerCAmelCase_ )
shutil.rmtree(lowerCAmelCase_ )
def _A ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int ):
"""simple docstring"""
lowerCAmelCase__ = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(F'Saving a {tokenizer_class.__name__} to {tokenizer_path}.' )
lowerCAmelCase__ = tokenizer_class(lowerCAmelCase_ )
tokenizer.save_pretrained(lowerCAmelCase_ )
def _A ( ):
"""simple docstring"""
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
"--input_dir" , help="Location of LLaMA weights, which contains tokenizer.model and model folders" , )
parser.add_argument(
"--model_size" , choices=["7B", "7Bf", "13B", "13Bf", "30B", "65B", "70B", "70Bf", "tokenizer_only"] , )
parser.add_argument(
"--output_dir" , help="Location to write HF model and tokenizer" , )
parser.add_argument("--safe_serialization" , type=lowerCAmelCase_ , help="Whether or not to save using `safetensors`." )
lowerCAmelCase__ = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
lowerCAmelCase__ = os.path.join(args.input_dir , "tokenizer.model" )
write_tokenizer(args.output_dir , lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 61 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 67 | 0 |
import os
from collections.abc import Iterator
def lowerCamelCase__ ( lowercase = "." ):
"""simple docstring"""
for dir_path, dir_names, filenames in os.walk(lowercase ):
SCREAMING_SNAKE_CASE : Tuple = [d for d in dir_names if d != "scripts" and d[0] not in "._"]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(lowercase )[1] in (".py", ".ipynb"):
yield os.path.join(lowercase , lowercase ).lstrip("./" )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
return F'''{i * ' '}*''' if i else "\n##"
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(lowercase ) or old_parts[i] != new_part) and new_part:
print(F'''{md_prefix(lowercase )} {new_part.replace('_' , ' ' ).title()}''' )
return new_path
def lowerCamelCase__ ( lowercase = "." ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = ""
for filepath in sorted(good_file_paths(lowercase ) ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = os.path.split(lowercase )
if filepath != old_path:
SCREAMING_SNAKE_CASE : Dict = print_path(lowercase , lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = (filepath.count(os.sep ) + 1) if filepath else 0
SCREAMING_SNAKE_CASE : int = F'''{filepath}/{filename}'''.replace(" " , "%20" )
SCREAMING_SNAKE_CASE : Tuple = os.path.splitext(filename.replace("_" , " " ).title() )[0]
print(F'''{md_prefix(lowercase )} [{filename}]({url})''' )
if __name__ == "__main__":
print_directory_md(""".""")
| 62 |
snake_case = {str(digit): digit**5 for digit in range(1_0)}
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> int:
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(snake_case__ ) )
def SCREAMING_SNAKE_CASE__ ( ) -> int:
return sum(
number
for number in range(1000 , 100_0000 )
if number == digits_fifth_powers_sum(snake_case__ ) )
if __name__ == "__main__":
print(solution()) | 67 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : List[str] = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
a : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 63 |
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> list[int]:
_lowercase = str(snake_case__ )
_lowercase = [n]
for i in range(1 , len(snake_case__ ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> bool:
if len(str(snake_case__ ) ) > 3:
if not is_prime(int(str(snake_case__ )[-3:] ) ) or not is_prime(int(str(snake_case__ )[:3] ) ):
return False
return True
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int = 11 ) -> list[int]:
_lowercase = []
_lowercase = 13
while len(snake_case__ ) != count:
if validate(snake_case__ ):
_lowercase = list_truncated_nums(snake_case__ )
if all(is_prime(snake_case__ ) for i in list_nums ):
list_truncated_primes.append(snake_case__ )
num += 2
return list_truncated_primes
def SCREAMING_SNAKE_CASE__ ( ) -> int:
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F"""{sum(compute_truncated_primes(1_1)) = }""") | 67 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class _lowerCamelCase ( UpperCamelCase_ ):
__a = "facebook/bart-large-mnli"
__a = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
__a = "text_classifier"
__a = AutoTokenizer
__a = AutoModelForSequenceClassification
__a = ["text", ["text"]]
__a = ["text"]
def UpperCamelCase_ ( self ) -> Optional[Any]:
super().setup()
SCREAMING_SNAKE_CASE__: str= self.model.config
SCREAMING_SNAKE_CASE__: Optional[Any]= -1
for idx, label in config.idalabel.items():
if label.lower().startswith('''entail''' ):
SCREAMING_SNAKE_CASE__: Optional[Any]= int(lowerCAmelCase )
if self.entailment_id == -1:
raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Dict= labels
return self.pre_processor(
[text] * len(lowerCAmelCase ) , [f'This example is {label}' for label in labels] , return_tensors='''pt''' , padding='''max_length''' , )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> List[Any]:
SCREAMING_SNAKE_CASE__: str= outputs.logits
SCREAMING_SNAKE_CASE__: Optional[int]= torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 64 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class A_ :
"""simple docstring"""
def __init__( self : Optional[Any] ,__A : Tuple ,__A : Any=99 ,__A : Any=13 ,__A : Dict=7 ,__A : List[Any]=9 ,__A : Dict=True ,__A : Any=True ,__A : Tuple=False ,__A : str=32 ,__A : int=5 ,__A : List[str]=4 ,__A : Optional[Any]=37 ,__A : int=8 ,__A : Any=0.1 ,__A : Dict=0.002 ,__A : Union[str, Any]=1 ,__A : Optional[Any]=0 ,__A : int=0 ,__A : Tuple=None ,__A : str=None ,) -> List[Any]:
_lowercase = parent
_lowercase = batch_size
_lowercase = encoder_seq_length
_lowercase = decoder_seq_length
# For common tests
_lowercase = self.decoder_seq_length
_lowercase = is_training
_lowercase = use_attention_mask
_lowercase = use_labels
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = d_ff
_lowercase = relative_attention_num_buckets
_lowercase = dropout_rate
_lowercase = initializer_factor
_lowercase = eos_token_id
_lowercase = pad_token_id
_lowercase = decoder_start_token_id
_lowercase = None
_lowercase = decoder_layers
def __UpperCAmelCase ( self : Dict ) -> Dict:
return TaConfig.from_pretrained('google/umt5-base' )
def __UpperCAmelCase ( self : Optional[int] ,__A : Optional[int] ,__A : int ,__A : str ,__A : List[str]=None ,__A : List[str]=None ,__A : Any=None ,__A : List[Any]=None ,__A : str=None ,) -> Tuple:
if attention_mask is None:
_lowercase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_lowercase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_lowercase = torch.ones(config.num_hidden_layers ,config.num_attention_heads ,device=__A )
if decoder_head_mask is None:
_lowercase = torch.ones(config.num_decoder_layers ,config.num_attention_heads ,device=__A )
if cross_attn_head_mask is None:
_lowercase = torch.ones(
config.num_decoder_layers ,config.num_attention_heads ,device=__A )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def __UpperCAmelCase ( self : Union[str, Any] ) -> int:
_lowercase = ids_tensor([self.batch_size, self.encoder_seq_length] ,self.vocab_size )
_lowercase = ids_tensor([self.batch_size, self.decoder_seq_length] ,self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_lowercase = input_ids.clamp(self.pad_token_id + 1 )
_lowercase = decoder_input_ids.clamp(self.pad_token_id + 1 )
_lowercase = self.get_config()
_lowercase = config.num_attention_heads
_lowercase = self.prepare_inputs_dict(__A ,__A ,__A )
return config, input_dict
def __UpperCAmelCase ( self : Dict ) -> str:
_lowercase , _lowercase = self.prepare_config_and_inputs()
return config, inputs_dict
def __UpperCAmelCase ( self : Dict ) -> Tuple:
return TaConfig(
vocab_size=166 ,d_model=self.hidden_size ,d_ff=self.d_ff ,d_kv=self.hidden_size // self.num_attention_heads ,num_layers=self.num_hidden_layers ,num_decoder_layers=self.decoder_layers ,num_heads=self.num_attention_heads ,relative_attention_num_buckets=self.relative_attention_num_buckets ,dropout_rate=self.dropout_rate ,initializer_factor=self.initializer_factor ,eos_token_id=self.eos_token_id ,bos_token_id=self.pad_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.decoder_start_token_id ,)
def __UpperCAmelCase ( self : Dict ) -> Any:
return TaConfig(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,d_ff=self.d_ff ,d_kv=self.hidden_size // self.num_attention_heads ,num_layers=self.num_hidden_layers ,num_decoder_layers=self.decoder_layers ,num_heads=self.num_attention_heads ,relative_attention_num_buckets=self.relative_attention_num_buckets ,dropout_rate=self.dropout_rate ,initializer_factor=self.initializer_factor ,eos_token_id=self.eos_token_id ,bos_token_id=self.pad_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.decoder_start_token_id ,)
def __UpperCAmelCase ( self : Union[str, Any] ,__A : List[str] ,__A : Dict ,__A : List[str] ,__A : List[Any] ,__A : Tuple ,__A : int ,) -> Tuple:
_lowercase = UMTaModel(config=__A )
model.to(__A )
model.eval()
_lowercase = model(
input_ids=__A ,decoder_input_ids=__A ,attention_mask=__A ,decoder_attention_mask=__A ,)
_lowercase = model(input_ids=__A ,decoder_input_ids=__A )
_lowercase = result.last_hidden_state
_lowercase = result.past_key_values
_lowercase = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() ,(self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() ,(self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(__A ) ,config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) ,4 )
def __UpperCAmelCase ( self : List[Any] ,__A : Tuple ,__A : int ,__A : Any ,__A : Tuple ,__A : Any ,__A : Optional[int] ,) -> List[str]:
_lowercase = UMTaModel(config=__A ).get_decoder().to(__A ).eval()
# first forward pass
_lowercase = model(__A ,use_cache=__A )
_lowercase = model(__A )
_lowercase = model(__A ,use_cache=__A )
self.parent.assertTrue(len(__A ) == len(__A ) )
self.parent.assertTrue(len(__A ) == len(__A ) + 1 )
_lowercase , _lowercase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowercase = ids_tensor((self.batch_size, 1) ,config.vocab_size )
# append to next input_ids and
_lowercase = torch.cat([input_ids, next_tokens] ,dim=-1 )
_lowercase = model(__A )['last_hidden_state']
_lowercase = model(__A ,past_key_values=__A )['last_hidden_state']
# select random slice
_lowercase = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
_lowercase = output_from_no_past[:, -1, random_slice_idx].detach()
_lowercase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__A ,__A ,atol=1e-3 ) )
def __UpperCAmelCase ( self : Any ,__A : List[str] ,__A : List[str] ,) -> int:
_lowercase = UMTaModel(config=__A ).to(__A ).half().eval()
_lowercase = model(**__A )['last_hidden_state']
self.parent.assertFalse(torch.isnan(__A ).any().item() )
@require_torch
class A_ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE_ : Tuple = (UMTaForConditionalGeneration,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : str = (
{
'''conversational''': UMTaForConditionalGeneration,
'''feature-extraction''': UMTaModel,
'''summarization''': UMTaForConditionalGeneration,
'''text2text-generation''': UMTaForConditionalGeneration,
'''translation''': UMTaForConditionalGeneration,
'''question-answering''': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
SCREAMING_SNAKE_CASE_ : int = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : List[Any] = True
SCREAMING_SNAKE_CASE_ : int = True
# The small UMT5 model needs higher percentages for CPU/MP tests
SCREAMING_SNAKE_CASE_ : Dict = [0.8, 0.9]
def __UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
_lowercase = UMTaModelTester(self )
@unittest.skip('Test has a segmentation fault on torch 1.8.0' )
def __UpperCAmelCase ( self : int ) -> str:
_lowercase = self.model_tester.prepare_config_and_inputs()
_lowercase = UMTaModel(config_and_inputs[0] ).to(__A )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
__A ,(config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) ,F"""{tmpdirname}/t5_test.onnx""" ,export_params=__A ,opset_version=9 ,input_names=['input_ids', 'decoder_input_ids'] ,)
@unittest.skipIf(torch_device == 'cpu' ,'Cant do half precision' )
def __UpperCAmelCase ( self : List[Any] ) -> str:
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*__A )
def __UpperCAmelCase ( self : List[str] ) -> int:
_lowercase = ['encoder_attentions', 'decoder_attentions', 'cross_attentions']
_lowercase = self.model_tester.prepare_config_and_inputs()
_lowercase = config_and_inputs[0]
_lowercase = UMTaForConditionalGeneration(__A ).eval()
model.to(__A )
_lowercase = {
'head_mask': torch.zeros(config.num_layers ,config.num_heads ,device=__A ),
'decoder_head_mask': torch.zeros(config.num_decoder_layers ,config.num_heads ,device=__A ),
'cross_attn_head_mask': torch.zeros(config.num_decoder_layers ,config.num_heads ,device=__A ),
}
for attn_name, (name, mask) in zip(__A ,head_masking.items() ):
_lowercase = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
_lowercase = torch.ones(
config.num_decoder_layers ,config.num_heads ,device=__A )
_lowercase = model.generate(
config_and_inputs[1]['input_ids'] ,num_beams=1 ,max_length=3 ,output_attentions=__A ,return_dict_in_generate=__A ,**__A ,)
# We check the state of decoder_attentions and cross_attentions just from the last step
_lowercase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) ,0.0 )
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.' )
def __UpperCAmelCase ( self : str ) -> List[Any]:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class A_ ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged' )
def __UpperCAmelCase ( self : int ) -> List[str]:
_lowercase = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' ,return_dict=__A ).to(__A )
_lowercase = AutoTokenizer.from_pretrained('google/umt5-small' ,use_fast=__A ,legacy=__A )
_lowercase = [
'Bonjour monsieur <extra_id_0> bien <extra_id_1>.',
'No se como puedo <extra_id_0>.',
'This is the reason why we <extra_id_0> them.',
'The <extra_id_0> walks in <extra_id_1>, seats',
'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.',
]
_lowercase = tokenizer(__A ,return_tensors='pt' ,padding=__A ).input_ids
# fmt: off
_lowercase = torch.tensor(
[
[ 3_8530, 21_0703, 25_6299, 1410, 25_6298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 2_5922, 25_6299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 1_9014, 1_0620, 758, 25_6299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 25_6299, 1_4869, 281, 301, 25_6298, 275, 11_9983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 25_6299, 1_4869, 281, 2234, 289, 2275, 333,6_1391, 289, 25_6298, 543, 25_6297, 16_8714, 329, 25_6296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(__A ,__A )
_lowercase = model.generate(input_ids.to(__A ) )
_lowercase = [
'<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>',
'<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
]
_lowercase = tokenizer.batch_decode(__A )
self.assertEqual(__A ,__A ) | 67 | 0 |
"""simple docstring"""
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
if isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : List[str] = np.full((len(__UpperCamelCase ), sequence_length, 2) , __UpperCamelCase )
else:
UpperCAmelCase__ : Any = np.full((len(__UpperCamelCase ), sequence_length) , __UpperCamelCase )
for i, tensor in enumerate(__UpperCamelCase ):
if padding_side == "right":
if isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : str = tensor[:sequence_length]
else:
UpperCAmelCase__ : str = tensor[:sequence_length]
else:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : Optional[Any] = tensor[:sequence_length]
else:
UpperCAmelCase__ : Union[str, Any] = tensor[:sequence_length]
return out_tensor.tolist()
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : str = ord(__UpperCamelCase )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
UpperCAmelCase__ : Optional[Any] = unicodedata.category(__UpperCamelCase )
if cat.startswith("""P""" ):
return True
return False
@dataclass
class __lowercase ( __lowerCamelCase ):
snake_case_ = 42
snake_case_ = True
snake_case_ = None
snake_case_ = None
snake_case_ = -1_0_0
snake_case_ = "pt"
def __lowercase ( self : Dict ,A : List[str] ):
'''simple docstring'''
import torch
UpperCAmelCase__ : Dict = """label""" if """label""" in features[0].keys() else """labels"""
UpperCAmelCase__ : Dict = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
UpperCAmelCase__ : Any = self.tokenizer.pad(
A ,padding=self.padding ,max_length=self.max_length ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors="""pt""" if labels is None else None ,)
if labels is None:
return batch
UpperCAmelCase__ : Union[str, Any] = torch.tensor(batch["""entity_ids"""] ).shape[1]
UpperCAmelCase__ : Union[str, Any] = self.tokenizer.padding_side
if padding_side == "right":
UpperCAmelCase__ : Optional[Any] = [
list(A ) + [self.label_pad_token_id] * (sequence_length - len(A )) for label in labels
]
else:
UpperCAmelCase__ : Tuple = [
[self.label_pad_token_id] * (sequence_length - len(A )) + list(A ) for label in labels
]
UpperCAmelCase__ : str = [feature["""ner_tags"""] for feature in features]
UpperCAmelCase__ : Any = padding_tensor(A ,-1 ,A ,A )
UpperCAmelCase__ : List[str] = [feature["""original_entity_spans"""] for feature in features]
UpperCAmelCase__ : Optional[int] = padding_tensor(A ,(-1, -1) ,A ,A )
UpperCAmelCase__ : Optional[int] = {k: torch.tensor(A ,dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 65 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue_model_parallelism.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1_6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1_6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
] )
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Any ) -> Optional[int]:
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() ,encoding='utf-8' ,check=__A ,)
assert hasattr(self ,'env' )
def __UpperCAmelCase ( self : str ,__A : Tuple ) -> int:
# configuration for running training on smdistributed Model Parallel
_lowercase = {
'enabled': True,
'processes_per_host': 8,
}
_lowercase = {
'enabled': True,
'parameters': {
'microbatches': 4,
'placement_strategy': 'spread',
'pipeline': 'interleaved',
'optimize': 'speed',
'partitions': 4,
'ddp': True,
},
}
_lowercase = {'smdistributed': {'modelparallel': smp_options}, 'mpi': mpi_options}
_lowercase = 'trainer' if self.script == 'run_glue.py' else 'smtrainer'
# creates estimator
return HuggingFace(
entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" ,instance_count=__A ,instance_type=self.instance_type ,debugger_hook_config=__A ,hyperparameters={
**self.env.hyperparameters,
'model_name_or_path': self.model_name_or_path,
'max_steps': 500,
} ,metric_definitions=self.env.metric_definitions ,distribution=__A ,py_version='py36' ,)
def __UpperCAmelCase ( self : List[Any] ,__A : Any ) -> Optional[Any]:
TrainingJobAnalytics(__A ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def __UpperCAmelCase ( self : Tuple ,__A : Union[str, Any] ) -> Optional[Any]:
# create estimator
_lowercase = self.create_estimator(__A )
# run training
estimator.fit()
# result dataframe
_lowercase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_lowercase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
_lowercase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_lowercase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' ,99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" ,'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} ,__A ) | 67 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase = {
"configuration_poolformer": [
"POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"PoolFormerConfig",
"PoolFormerOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["PoolFormerFeatureExtractor"]
UpperCamelCase = ["PoolFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PoolFormerForImageClassification",
"PoolFormerModel",
"PoolFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 66 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json""",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''blenderbot-small'''
SCREAMING_SNAKE_CASE_ : int = ['''past_key_values''']
SCREAMING_SNAKE_CASE_ : Tuple = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[str] ,__A : List[Any]=5_0265 ,__A : str=512 ,__A : Optional[int]=8 ,__A : Any=2048 ,__A : Tuple=16 ,__A : str=8 ,__A : int=2048 ,__A : List[str]=16 ,__A : Optional[int]=0.0 ,__A : Any=0.0 ,__A : int=True ,__A : List[Any]=True ,__A : Tuple="gelu" ,__A : Any=512 ,__A : Dict=0.1 ,__A : Tuple=0.0 ,__A : int=0.0 ,__A : int=0.02 ,__A : Dict=1 ,__A : str=False ,__A : Dict=0 ,__A : Union[str, Any]=1 ,__A : Optional[int]=2 ,__A : List[str]=2 ,**__A : Tuple ,) -> Tuple:
_lowercase = vocab_size
_lowercase = max_position_embeddings
_lowercase = d_model
_lowercase = encoder_ffn_dim
_lowercase = encoder_layers
_lowercase = encoder_attention_heads
_lowercase = decoder_ffn_dim
_lowercase = decoder_layers
_lowercase = decoder_attention_heads
_lowercase = dropout
_lowercase = attention_dropout
_lowercase = activation_dropout
_lowercase = activation_function
_lowercase = init_std
_lowercase = encoder_layerdrop
_lowercase = decoder_layerdrop
_lowercase = use_cache
_lowercase = encoder_layers
_lowercase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__A ,bos_token_id=__A ,eos_token_id=__A ,is_encoder_decoder=__A ,decoder_start_token_id=__A ,forced_eos_token_id=__A ,**__A ,)
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_lowercase = {0: 'batch'}
_lowercase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
_lowercase = {0: 'batch', 1: 'decoder_sequence'}
_lowercase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__A ,direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
_lowercase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_lowercase , _lowercase = self.num_layers
for i in range(__A ):
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
else:
_lowercase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def __UpperCAmelCase ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = super().outputs
else:
_lowercase = super(__A ,self ).outputs
if self.use_past:
_lowercase , _lowercase = self.num_layers
for i in range(__A ):
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def __UpperCAmelCase ( self : Optional[int] ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,__A ,__A ,__A ,__A )
# Generate decoder inputs
_lowercase = seq_length if not self.use_past else 1
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,__A ,__A ,__A ,__A )
_lowercase = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
_lowercase = dict(**__A ,**__A )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowercase , _lowercase = common_inputs['input_ids'].shape
_lowercase = common_inputs['decoder_input_ids'].shape[1]
_lowercase , _lowercase = self.num_attention_heads
_lowercase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowercase = decoder_seq_length + 3
_lowercase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_lowercase = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(__A ,__A )] ,dim=1 )
_lowercase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_lowercase , _lowercase = self.num_layers
_lowercase = min(__A ,__A )
_lowercase = max(__A ,__A ) - min_num_layers
_lowercase = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(__A ):
common_inputs["past_key_values"].append(
(
torch.zeros(__A ),
torch.zeros(__A ),
torch.zeros(__A ),
torch.zeros(__A ),
) )
# TODO: test this.
_lowercase = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(__A ,__A ):
common_inputs["past_key_values"].append((torch.zeros(__A ), torch.zeros(__A )) )
return common_inputs
def __UpperCAmelCase ( self : List[Any] ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,__A ,__A ,__A ,__A )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowercase , _lowercase = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_lowercase = seqlen + 2
_lowercase , _lowercase = self.num_layers
_lowercase , _lowercase = self.num_attention_heads
_lowercase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowercase = common_inputs['attention_mask'].dtype
_lowercase = torch.cat(
[common_inputs['attention_mask'], torch.ones(__A ,__A ,dtype=__A )] ,dim=1 )
_lowercase = [
(torch.zeros(__A ), torch.zeros(__A )) for _ in range(__A )
]
return common_inputs
def __UpperCAmelCase ( self : Any ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowercase = compute_effective_axis_dimension(
__A ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowercase = tokenizer.num_special_tokens_to_add(__A )
_lowercase = compute_effective_axis_dimension(
__A ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=__A )
# Generate dummy inputs according to compute batch and sequence
_lowercase = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
_lowercase = dict(tokenizer(__A ,return_tensors=__A ) )
return common_inputs
def __UpperCAmelCase ( self : Dict ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A )
elif self.task == "causal-lm":
_lowercase = self._generate_dummy_inputs_for_causal_lm(
__A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A )
else:
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A )
return common_inputs
def __UpperCAmelCase ( self : List[str] ,__A : Dict ,__A : Any ,__A : List[Any] ,__A : Tuple ) -> Union[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = super()._flatten_past_key_values_(__A ,__A ,__A ,__A )
else:
_lowercase = super(__A ,self )._flatten_past_key_values_(
__A ,__A ,__A ,__A ) | 67 | 0 |
import numpy as np
def lowercase__ ( A_: np.ndarray ) -> np.ndarray:
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
def lowercase__ ( A_: np.ndarray ) -> np.ndarray:
"""simple docstring"""
return vector * sigmoid(A_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 68 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : Any ) -> str:
torch.manual_seed(0 )
_lowercase = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=('DownBlock2D', 'AttnDownBlock2D') ,up_block_types=('AttnUpBlock2D', 'UpBlock2D') ,)
return model
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
_lowercase = self.dummy_uncond_unet
_lowercase = KarrasVeScheduler()
_lowercase = KarrasVePipeline(unet=__A ,scheduler=__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_lowercase = torch.manual_seed(0 )
_lowercase = pipe(num_inference_steps=2 ,generator=__A ,output_type='numpy' ).images
_lowercase = torch.manual_seed(0 )
_lowercase = pipe(num_inference_steps=2 ,generator=__A ,output_type='numpy' ,return_dict=__A )[0]
_lowercase = image[0, -3:, -3:, -1]
_lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowercase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
_lowercase = 'google/ncsnpp-celebahq-256'
_lowercase = UNetaDModel.from_pretrained(__A )
_lowercase = KarrasVeScheduler()
_lowercase = KarrasVePipeline(unet=__A ,scheduler=__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_lowercase = torch.manual_seed(0 )
_lowercase = pipe(num_inference_steps=20 ,generator=__A ,output_type='numpy' ).images
_lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_lowercase = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 67 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> str:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
__snake_case = False
if num < 0:
__snake_case = True
__snake_case = -num
__snake_case = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(_UpperCAmelCase ) for e in binary )
return "0b" + "".join(str(_UpperCAmelCase ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 69 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str , snake_case__ :str ) -> list:
_lowercase = len(snake_case__ )
_lowercase = []
for i in range(len(snake_case__ ) - pat_len + 1 ):
_lowercase = True
for j in range(snake_case__ ):
if s[i + j] != pattern[j]:
_lowercase = False
break
if match_found:
position.append(snake_case__ )
return position
if __name__ == "__main__":
assert naive_pattern_search("""ABCDEFG""", """DE""") == [3]
print(naive_pattern_search("""ABAAABCDBBABCDDEBCABC""", """ABC""")) | 67 | 0 |
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = str(lowercase )
return len(lowercase ) == 9 and set(lowercase ) == set('123456789' )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
for base_num in range(99_99 , 49_99 , -1 ):
lowerCamelCase_ = 10_00_02 * base_num
if is_9_pandigital(lowercase ):
return candidate
for base_num in range(3_33 , 99 , -1 ):
lowerCamelCase_ = 1_00_20_03 * base_num
if is_9_pandigital(lowercase ):
return candidate
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 70 |
from typing import Any
import numpy as np
def SCREAMING_SNAKE_CASE__ ( snake_case__ :np.ndarray ) -> bool:
return np.array_equal(snake_case__ , matrix.conjugate().T )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :np.ndarray , snake_case__ :np.ndarray ) -> Any:
_lowercase = v.conjugate().T
_lowercase = v_star.dot(snake_case__ )
assert isinstance(snake_case__ , np.ndarray )
return (v_star_dot.dot(snake_case__ )) / (v_star.dot(snake_case__ ))
def SCREAMING_SNAKE_CASE__ ( ) -> None:
_lowercase = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
_lowercase = np.array([[1], [2], [3]] )
assert is_hermitian(snake_case__ ), F"""{a} is not hermitian."""
print(rayleigh_quotient(snake_case__ , snake_case__ ) )
_lowercase = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(snake_case__ ), F"""{a} is not hermitian."""
assert rayleigh_quotient(snake_case__ , snake_case__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests() | 67 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCamelCase = {"""configuration_plbart""": ["""PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PLBartConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ["""PLBartTokenizer"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
"""PLBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PLBartForCausalLM""",
"""PLBartForConditionalGeneration""",
"""PLBartForSequenceClassification""",
"""PLBartModel""",
"""PLBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 71 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple ,__A : Dict ,__A : List[Any]=7 ,__A : Dict=3 ,__A : Tuple=30 ,__A : Dict=400 ,__A : Any=True ,__A : List[Any]=None ,__A : Any=True ,__A : List[str]=[0.5, 0.5, 0.5] ,__A : Union[str, Any]=[0.5, 0.5, 0.5] ,__A : int=True ,__A : List[str]=1 / 255 ,__A : Union[str, Any]=True ,) -> List[Any]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_lowercase = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
_lowercase = parent
_lowercase = batch_size
_lowercase = num_channels
_lowercase = min_resolution
_lowercase = max_resolution
_lowercase = do_resize
_lowercase = size
_lowercase = do_normalize
_lowercase = image_mean
_lowercase = image_std
_lowercase = do_rescale
_lowercase = rescale_factor
_lowercase = do_pad
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __UpperCAmelCase ( self : Tuple ,__A : Union[str, Any] ,__A : List[str]=False ) -> Union[str, Any]:
if not batched:
_lowercase = image_inputs[0]
if isinstance(__A ,Image.Image ):
_lowercase , _lowercase = image.size
else:
_lowercase , _lowercase = image.shape[1], image.shape[2]
if w < h:
_lowercase = int(self.size['shortest_edge'] * h / w )
_lowercase = self.size['shortest_edge']
elif w > h:
_lowercase = self.size['shortest_edge']
_lowercase = int(self.size['shortest_edge'] * w / h )
else:
_lowercase = self.size['shortest_edge']
_lowercase = self.size['shortest_edge']
else:
_lowercase = []
for image in image_inputs:
_lowercase , _lowercase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_lowercase = max(__A ,key=lambda __A : item[0] )[0]
_lowercase = max(__A ,key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A_ ( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = DetaImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
_lowercase = DetaImageProcessingTester(self )
@property
def __UpperCAmelCase ( self : List[Any] ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
_lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A ,'image_mean' ) )
self.assertTrue(hasattr(__A ,'image_std' ) )
self.assertTrue(hasattr(__A ,'do_normalize' ) )
self.assertTrue(hasattr(__A ,'do_resize' ) )
self.assertTrue(hasattr(__A ,'do_rescale' ) )
self.assertTrue(hasattr(__A ,'do_pad' ) )
self.assertTrue(hasattr(__A ,'size' ) )
def __UpperCAmelCase ( self : str ) -> List[str]:
_lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad ,__A )
def __UpperCAmelCase ( self : List[Any] ) -> Any:
pass
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A ,Image.Image )
# Test not batched input
_lowercase = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A ,batched=__A )
_lowercase = image_processing(__A ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__A ,numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A ,np.ndarray )
# Test not batched input
_lowercase = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
_lowercase = image_processing(__A ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A ,batched=__A )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def __UpperCAmelCase ( self : Optional[Any] ) -> Any:
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__A ,torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A ,torch.Tensor )
# Test not batched input
_lowercase = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
_lowercase = image_processing(__A ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A ,batched=__A )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
@slow
def __UpperCAmelCase ( self : List[str] ) -> List[str]:
# prepare image and target
_lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' ,'r' ) as f:
_lowercase = json.loads(f.read() )
_lowercase = {'image_id': 3_9769, 'annotations': target}
# encode them
_lowercase = DetaImageProcessor()
_lowercase = image_processing(images=__A ,annotations=__A ,return_tensors='pt' )
# verify pixel values
_lowercase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape ,__A )
_lowercase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,__A ,atol=1e-4 ) )
# verify area
_lowercase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,__A ) )
# verify boxes
_lowercase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape ,__A )
_lowercase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,__A ,atol=1e-3 ) )
# verify image_id
_lowercase = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,__A ) )
# verify is_crowd
_lowercase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,__A ) )
# verify class_labels
_lowercase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,__A ) )
# verify orig_size
_lowercase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,__A ) )
# verify size
_lowercase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,__A ) )
@slow
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
# prepare image, target and masks_path
_lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' ,'r' ) as f:
_lowercase = json.loads(f.read() )
_lowercase = {'file_name': '000000039769.png', 'image_id': 3_9769, 'segments_info': target}
_lowercase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
_lowercase = DetaImageProcessor(format='coco_panoptic' )
_lowercase = image_processing(images=__A ,annotations=__A ,masks_path=__A ,return_tensors='pt' )
# verify pixel values
_lowercase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape ,__A )
_lowercase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,__A ,atol=1e-4 ) )
# verify area
_lowercase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,__A ) )
# verify boxes
_lowercase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape ,__A )
_lowercase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,__A ,atol=1e-3 ) )
# verify image_id
_lowercase = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,__A ) )
# verify is_crowd
_lowercase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,__A ) )
# verify class_labels
_lowercase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,__A ) )
# verify masks
_lowercase = 82_2873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() ,__A )
# verify orig_size
_lowercase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,__A ) )
# verify size
_lowercase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,__A ) ) | 67 | 0 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class __magic_name__ :
def __init__( self , snake_case_ , snake_case_=2 , snake_case_=True , snake_case_=False , snake_case_=10 , snake_case_=3 , snake_case_=32 * 4 , snake_case_=32 * 6 , snake_case_=4 , snake_case_=32 , ):
lowercase =parent
lowercase =batch_size
lowercase =is_training
lowercase =use_auxiliary_loss
lowercase =num_queries
lowercase =num_channels
lowercase =min_size
lowercase =max_size
lowercase =num_labels
lowercase =mask_feature_size
def _A( self ):
lowercase =floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
snake_case_ )
lowercase =torch.ones([self.batch_size, self.min_size, self.max_size] , device=snake_case_ )
lowercase =(
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=snake_case_ ) > 0.5
).float()
lowercase =(torch.rand((self.batch_size, self.num_labels) , device=snake_case_ ) > 0.5).long()
lowercase =self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _A( self ):
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_28 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def _A( self ):
lowercase , lowercase , lowercase , lowercase , lowercase =self.prepare_config_and_inputs()
lowercase ={'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def _A( self , snake_case_ , snake_case_ ):
lowercase =output.encoder_hidden_states
lowercase =output.pixel_decoder_hidden_states
lowercase =output.transformer_decoder_hidden_states
self.parent.assertTrue(len(snake_case_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(snake_case_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(snake_case_ ) , config.decoder_config.decoder_layers )
def _A( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=False ):
with torch.no_grad():
lowercase =MaskFormerModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
lowercase =model(pixel_values=snake_case_ , pixel_mask=snake_case_ )
lowercase =model(snake_case_ , output_hidden_states=snake_case_ )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(snake_case_ , snake_case_ )
def _A( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
lowercase =MaskFormerForInstanceSegmentation(config=snake_case_ )
model.to(snake_case_ )
model.eval()
def comm_check_on_output(snake_case_ ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
lowercase =model(pixel_values=snake_case_ , pixel_mask=snake_case_ )
lowercase =model(snake_case_ )
comm_check_on_output(snake_case_ )
lowercase =model(
pixel_values=snake_case_ , pixel_mask=snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ )
comm_check_on_output(snake_case_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __magic_name__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
UpperCamelCase__ = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
UpperCamelCase__ = (
{'feature-extraction': MaskFormerModel, 'image-segmentation': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def _A( self ):
lowercase =MaskFormerModelTester(self )
lowercase =ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ )
def _A( self ):
self.config_tester.run_common_tests()
def _A( self ):
lowercase , lowercase =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(snake_case_ , **snake_case_ , output_hidden_states=snake_case_ )
def _A( self ):
lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*snake_case_ )
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''' )
def _A( self ):
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' )
def _A( self ):
pass
@unittest.skip(reason='''MaskFormer is not a generative model''' )
def _A( self ):
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''' )
def _A( self ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def _A( self ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _A( self ):
pass
def _A( self ):
lowercase , lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase =model_class(snake_case_ )
lowercase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase =[*signature.parameters.keys()]
lowercase =['''pixel_values''']
self.assertListEqual(arg_names[:1] , snake_case_ )
@slow
def _A( self ):
for model_name in ["facebook/maskformer-swin-small-coco"]:
lowercase =MaskFormerModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def _A( self ):
lowercase =(self.model_tester.min_size,) * 2
lowercase ={
'''pixel_values''': torch.randn((2, 3, *size) , device=snake_case_ ),
'''mask_labels''': torch.randn((2, 10, *size) , device=snake_case_ ),
'''class_labels''': torch.zeros(2 , 10 , device=snake_case_ ).long(),
}
lowercase =MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(snake_case_ )
lowercase =model(**snake_case_ )
self.assertTrue(outputs.loss is not None )
def _A( self ):
lowercase , lowercase =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(snake_case_ , **snake_case_ , output_hidden_states=snake_case_ )
def _A( self ):
lowercase , lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase =model_class(snake_case_ ).to(snake_case_ )
lowercase =model(**snake_case_ , output_attentions=snake_case_ )
self.assertTrue(outputs.attentions is not None )
def _A( self ):
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
lowercase =self.all_model_classes[1]
lowercase , lowercase , lowercase , lowercase , lowercase =self.model_tester.prepare_config_and_inputs()
lowercase =model_class(snake_case_ )
model.to(snake_case_ )
model.train()
lowercase =model(snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ ).loss
loss.backward()
def _A( self ):
# only MaskFormerForInstanceSegmentation has the loss
lowercase =self.all_model_classes[1]
lowercase , lowercase , lowercase , lowercase , lowercase =self.model_tester.prepare_config_and_inputs()
lowercase =True
lowercase =True
lowercase =model_class(snake_case_ )
model.to(snake_case_ )
model.train()
lowercase =model(snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ )
lowercase =outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowercase =outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
lowercase =outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowercase =outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=snake_case_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_UpperCAmelCase : List[Any] = 1e-4
def UpperCamelCase ( ) -> Optional[Any]:
'''simple docstring'''
lowercase =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class __magic_name__ ( unittest.TestCase ):
@cached_property
def _A( self ):
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' )
if is_vision_available()
else None
)
def _A( self ):
lowercase =MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(snake_case_ )
lowercase =self.default_image_processor
lowercase =prepare_img()
lowercase =image_processor(snake_case_ , return_tensors='''pt''' ).to(snake_case_ )
lowercase =inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(snake_case_ , (1, 3, 8_00, 10_88) )
with torch.no_grad():
lowercase =model(**snake_case_ )
lowercase =torch.tensor(
[[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(snake_case_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) )
lowercase =torch.tensor(
[[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(snake_case_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) )
lowercase =torch.tensor(
[[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(snake_case_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def _A( self ):
lowercase =(
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(snake_case_ )
.eval()
)
lowercase =self.default_image_processor
lowercase =prepare_img()
lowercase =image_processor(snake_case_ , return_tensors='''pt''' ).to(snake_case_ )
lowercase =inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(snake_case_ , (1, 3, 8_00, 10_88) )
with torch.no_grad():
lowercase =model(**snake_case_ )
# masks_queries_logits
lowercase =outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
lowercase =[
[-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33],
[-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95],
[-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42],
]
lowercase =torch.tensor(snake_case_ ).to(snake_case_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) )
# class_queries_logits
lowercase =outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowercase =torch.tensor(
[
[1.6_512E00, -5.2_572E00, -3.3_519E00],
[3.6_169E-02, -5.9_025E00, -2.9_313E00],
[1.0_766E-04, -7.7_630E00, -5.1_263E00],
] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def _A( self ):
lowercase =(
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' )
.to(snake_case_ )
.eval()
)
lowercase =self.default_image_processor
lowercase =prepare_img()
lowercase =image_processor(snake_case_ , return_tensors='''pt''' ).to(snake_case_ )
lowercase =inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(snake_case_ , (1, 3, 8_00, 10_88) )
with torch.no_grad():
lowercase =model(**snake_case_ )
# masks_queries_logits
lowercase =outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
lowercase =[[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]]
lowercase =torch.tensor(snake_case_ ).to(snake_case_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) )
# class_queries_logits
lowercase =outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowercase =torch.tensor(
[[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def _A( self ):
lowercase =(
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(snake_case_ )
.eval()
)
lowercase =self.default_image_processor
lowercase =image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors='''pt''' , )
lowercase =inputs['''pixel_values'''].to(snake_case_ )
lowercase =[el.to(snake_case_ ) for el in inputs['''mask_labels''']]
lowercase =[el.to(snake_case_ ) for el in inputs['''class_labels''']]
with torch.no_grad():
lowercase =model(**snake_case_ )
self.assertTrue(outputs.loss is not None )
| 72 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("""At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training""")
# TF training parameters
snake_case = False
snake_case = False
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Namespace ) -> Tuple:
return TrainCommand(snake_case__ )
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@staticmethod
def __UpperCAmelCase ( __A : ArgumentParser ) -> List[Any]:
_lowercase = parser.add_parser('train' ,help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data' ,type=__A ,required=__A ,help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' ,)
train_parser.add_argument(
'--column_label' ,type=__A ,default=0 ,help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text' ,type=__A ,default=1 ,help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id' ,type=__A ,default=2 ,help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row' ,action='store_true' ,help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data' ,type=__A ,default='' ,help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split' ,type=__A ,default=0.1 ,help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' ,)
train_parser.add_argument('--output' ,type=__A ,default='./' ,help='path to saved the trained model.' )
train_parser.add_argument(
'--task' ,type=__A ,default='text_classification' ,help='Task to train the model on.' )
train_parser.add_argument(
'--model' ,type=__A ,default='bert-base-uncased' ,help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size' ,type=__A ,default=32 ,help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size' ,type=__A ,default=64 ,help='Batch size for validation.' )
train_parser.add_argument('--learning_rate' ,type=__A ,default=3e-5 ,help='Learning rate.' )
train_parser.add_argument('--adam_epsilon' ,type=__A ,default=1e-08 ,help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=__A )
def __init__( self : Optional[Any] ,__A : Namespace ) -> Tuple:
_lowercase = logging.get_logger('transformers-cli/training' )
_lowercase = 'tf' if is_tf_available() else 'torch'
os.makedirs(args.output ,exist_ok=__A )
_lowercase = args.output
_lowercase = args.column_label
_lowercase = args.column_text
_lowercase = args.column_id
self.logger.info(F"""Loading {args.task} pipeline for {args.model}""" )
if args.task == "text_classification":
_lowercase = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F"""Loading dataset from {args.train_data}""" )
_lowercase = Processor.create_from_csv(
args.train_data ,column_label=args.column_label ,column_text=args.column_text ,column_id=args.column_id ,skip_first_row=args.skip_first_row ,)
_lowercase = None
if args.validation_data:
self.logger.info(F"""Loading validation dataset from {args.validation_data}""" )
_lowercase = Processor.create_from_csv(
args.validation_data ,column_label=args.column_label ,column_text=args.column_text ,column_id=args.column_id ,skip_first_row=args.skip_first_row ,)
_lowercase = args.validation_split
_lowercase = args.train_batch_size
_lowercase = args.valid_batch_size
_lowercase = args.learning_rate
_lowercase = args.adam_epsilon
def __UpperCAmelCase ( self : Optional[Any] ) -> str:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
raise NotImplementedError
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
self.pipeline.fit(
self.train_dataset ,validation_data=self.valid_dataset ,validation_split=self.validation_split ,learning_rate=self.learning_rate ,adam_epsilon=self.adam_epsilon ,train_batch_size=self.train_batch_size ,valid_batch_size=self.valid_batch_size ,)
# Save trained pipeline
self.pipeline.save_pretrained(self.output ) | 67 | 0 |
def lowerCamelCase__ (_UpperCAmelCase = 5000_0000):
SCREAMING_SNAKE_CASE = set()
SCREAMING_SNAKE_CASE = int((limit - 24) ** (1 / 2))
SCREAMING_SNAKE_CASE = set(range(3 , prime_square_limit + 1 , 2))
primes.add(2)
for p in range(3 , prime_square_limit + 1 , 2):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , _UpperCAmelCase)))
for primea in primes:
SCREAMING_SNAKE_CASE = primea * primea
for primea in primes:
SCREAMING_SNAKE_CASE = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
SCREAMING_SNAKE_CASE = primea * primea * primea * primea
SCREAMING_SNAKE_CASE = square + cube + tetr
if total >= limit:
break
ret.add(_UpperCAmelCase)
return len(_UpperCAmelCase)
if __name__ == "__main__":
print(f"""{solution() = }""")
| 73 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Any ) -> str:
_lowercase = DPTConfig(embedding_type='hybrid' )
if "large" in checkpoint_url:
_lowercase = 1024
_lowercase = 4096
_lowercase = 24
_lowercase = 16
_lowercase = [5, 11, 17, 23]
_lowercase = [256, 512, 1024, 1024]
_lowercase = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
_lowercase = 768
_lowercase = [1, 1, 1, 0.5]
_lowercase = [256, 512, 768, 768]
_lowercase = 150
_lowercase = 16
_lowercase = (1, 384, 384)
_lowercase = False
_lowercase = 'project'
if "ade" in checkpoint_url:
_lowercase = True
_lowercase = 768
_lowercase = [1, 1, 1, 0.5]
_lowercase = 150
_lowercase = 16
_lowercase = 'huggingface/label-files'
_lowercase = 'ade20k-id2label.json'
_lowercase = json.load(open(cached_download(hf_hub_url(snake_case__ , snake_case__ , repo_type='dataset' ) ) , 'r' ) )
_lowercase = {int(snake_case__ ): v for k, v in idalabel.items()}
_lowercase = idalabel
_lowercase = {v: k for k, v in idalabel.items()}
_lowercase = [1, 150, 480, 480]
return config, expected_shape
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> str:
_lowercase = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[Any] ) -> Any:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
_lowercase = name.replace('pretrained.model' , 'dpt.encoder' )
if "pretrained.model" in name:
_lowercase = name.replace('pretrained.model' , 'dpt.embeddings' )
if "patch_embed" in name:
_lowercase = name.replace('patch_embed' , '' )
if "pos_embed" in name:
_lowercase = name.replace('pos_embed' , 'position_embeddings' )
if "attn.proj" in name:
_lowercase = name.replace('attn.proj' , 'attention.output.dense' )
if "proj" in name and "project" not in name:
_lowercase = name.replace('proj' , 'projection' )
if "blocks" in name:
_lowercase = name.replace('blocks' , 'layer' )
if "mlp.fc1" in name:
_lowercase = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_lowercase = name.replace('mlp.fc2' , 'output.dense' )
if "norm1" in name and "backbone" not in name:
_lowercase = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name and "backbone" not in name:
_lowercase = name.replace('norm2' , 'layernorm_after' )
if "scratch.output_conv" in name:
_lowercase = name.replace('scratch.output_conv' , 'head' )
if "scratch" in name:
_lowercase = name.replace('scratch' , 'neck' )
if "layer1_rn" in name:
_lowercase = name.replace('layer1_rn' , 'convs.0' )
if "layer2_rn" in name:
_lowercase = name.replace('layer2_rn' , 'convs.1' )
if "layer3_rn" in name:
_lowercase = name.replace('layer3_rn' , 'convs.2' )
if "layer4_rn" in name:
_lowercase = name.replace('layer4_rn' , 'convs.3' )
if "refinenet" in name:
_lowercase = int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
_lowercase = name.replace(F"""refinenet{layer_idx}""" , F"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
_lowercase = name.replace('out_conv' , 'projection' )
if "resConfUnit1" in name:
_lowercase = name.replace('resConfUnit1' , 'residual_layer1' )
if "resConfUnit2" in name:
_lowercase = name.replace('resConfUnit2' , 'residual_layer2' )
if "conv1" in name:
_lowercase = name.replace('conv1' , 'convolution1' )
if "conv2" in name:
_lowercase = name.replace('conv2' , 'convolution2' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
_lowercase = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0' )
if "pretrained.act_postprocess2.0.project.0" in name:
_lowercase = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0' )
if "pretrained.act_postprocess3.0.project.0" in name:
_lowercase = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0' )
if "pretrained.act_postprocess4.0.project.0" in name:
_lowercase = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
_lowercase = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection' )
if "pretrained.act_postprocess1.4" in name:
_lowercase = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize' )
if "pretrained.act_postprocess2.3" in name:
_lowercase = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection' )
if "pretrained.act_postprocess2.4" in name:
_lowercase = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize' )
if "pretrained.act_postprocess3.3" in name:
_lowercase = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection' )
if "pretrained.act_postprocess4.3" in name:
_lowercase = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection' )
if "pretrained.act_postprocess4.4" in name:
_lowercase = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize' )
if "pretrained" in name:
_lowercase = name.replace('pretrained' , 'dpt' )
if "bn" in name:
_lowercase = name.replace('bn' , 'batch_norm' )
if "head" in name:
_lowercase = name.replace('head' , 'head.head' )
if "encoder.norm" in name:
_lowercase = name.replace('encoder.norm' , 'layernorm' )
if "auxlayer" in name:
_lowercase = name.replace('auxlayer' , 'auxiliary_head.head' )
if "backbone" in name:
_lowercase = name.replace('backbone' , 'backbone.bit.encoder' )
if ".." in name:
_lowercase = name.replace('..' , '.' )
if "stem.conv" in name:
_lowercase = name.replace('stem.conv' , 'bit.embedder.convolution' )
if "blocks" in name:
_lowercase = name.replace('blocks' , 'layers' )
if "convolution" in name and "backbone" in name:
_lowercase = name.replace('convolution' , 'conv' )
if "layer" in name and "backbone" in name:
_lowercase = name.replace('layer' , 'layers' )
if "backbone.bit.encoder.bit" in name:
_lowercase = name.replace('backbone.bit.encoder.bit' , 'backbone.bit' )
if "embedder.conv" in name:
_lowercase = name.replace('embedder.conv' , 'embedder.convolution' )
if "backbone.bit.encoder.stem.norm" in name:
_lowercase = name.replace('backbone.bit.encoder.stem.norm' , 'backbone.bit.embedder.norm' )
return name
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[str] , snake_case__ :int ) -> Dict:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowercase = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
_lowercase = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowercase = in_proj_weight[: config.hidden_size, :]
_lowercase = in_proj_bias[: config.hidden_size]
_lowercase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowercase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowercase = in_proj_weight[
-config.hidden_size :, :
]
_lowercase = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
_lowercase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowercase = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[int] , snake_case__ :List[Any] , snake_case__ :str , snake_case__ :Any , snake_case__ :List[str] ) -> str:
_lowercase , _lowercase = get_dpt_config(snake_case__ )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
_lowercase = torch.load(snake_case__ , map_location='cpu' )
# remove certain keys
remove_ignore_keys_(snake_case__ )
# rename keys
for key in state_dict.copy().keys():
_lowercase = state_dict.pop(snake_case__ )
_lowercase = val
# read in qkv matrices
read_in_q_k_v(snake_case__ , snake_case__ )
# load HuggingFace model
_lowercase = DPTForSemanticSegmentation(snake_case__ ) if 'ade' in checkpoint_url else DPTForDepthEstimation(snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
# Check outputs on an image
_lowercase = 480 if 'ade' in checkpoint_url else 384
_lowercase = DPTImageProcessor(size=snake_case__ )
_lowercase = prepare_img()
_lowercase = image_processor(snake_case__ , return_tensors='pt' )
# forward pass
_lowercase = model(**snake_case__ ).logits if 'ade' in checkpoint_url else model(**snake_case__ ).predicted_depth
if show_prediction:
_lowercase = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='bicubic' , align_corners=snake_case__ , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(snake_case__ )
if push_to_hub:
model.push_to_hub('ybelkada/dpt-hybrid-midas' )
image_processor.push_to_hub('ybelkada/dpt-hybrid-midas' )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you're pushing to the hub.""",
)
parser.add_argument(
"""--show_prediction""",
action="""store_true""",
)
snake_case = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
) | 67 | 0 |
import os
import sys
import unittest
lowercase_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowercase_ = os.path.join("""tests""", """models""", """bert""", """test_modeling_bert.py""")
lowercase_ = os.path.join("""tests""", """models""", """blip""", """test_modeling_blip.py""")
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = get_test_to_tester_mapping(_A )
__SCREAMING_SNAKE_CASE : int = get_test_to_tester_mapping(_A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {'''BertModelTest''': '''BertModelTester'''}
__SCREAMING_SNAKE_CASE : List[str] = {
'''BlipModelTest''': '''BlipModelTester''',
'''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''',
'''BlipTextModelTest''': '''BlipTextModelTester''',
'''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''',
'''BlipVQAModelTest''': '''BlipVQAModelTester''',
'''BlipVisionModelTest''': '''BlipVisionModelTester''',
}
self.assertEqual(get_test_info.to_json(_A ) , _A )
self.assertEqual(get_test_info.to_json(_A ) , _A )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = get_model_to_test_mapping(_A )
__SCREAMING_SNAKE_CASE : Dict = get_model_to_test_mapping(_A )
__SCREAMING_SNAKE_CASE : Dict = {
'''BertForMaskedLM''': ['''BertModelTest'''],
'''BertForMultipleChoice''': ['''BertModelTest'''],
'''BertForNextSentencePrediction''': ['''BertModelTest'''],
'''BertForPreTraining''': ['''BertModelTest'''],
'''BertForQuestionAnswering''': ['''BertModelTest'''],
'''BertForSequenceClassification''': ['''BertModelTest'''],
'''BertForTokenClassification''': ['''BertModelTest'''],
'''BertLMHeadModel''': ['''BertModelTest'''],
'''BertModel''': ['''BertModelTest'''],
}
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''],
'''BlipModel''': ['''BlipModelTest'''],
'''BlipTextModel''': ['''BlipTextModelTest'''],
'''BlipVisionModel''': ['''BlipVisionModelTest'''],
}
self.assertEqual(get_test_info.to_json(_A ) , _A )
self.assertEqual(get_test_info.to_json(_A ) , _A )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = get_model_to_tester_mapping(_A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = get_model_to_tester_mapping(_A )
__SCREAMING_SNAKE_CASE : int = {
'''BertForMaskedLM''': ['''BertModelTester'''],
'''BertForMultipleChoice''': ['''BertModelTester'''],
'''BertForNextSentencePrediction''': ['''BertModelTester'''],
'''BertForPreTraining''': ['''BertModelTester'''],
'''BertForQuestionAnswering''': ['''BertModelTester'''],
'''BertForSequenceClassification''': ['''BertModelTester'''],
'''BertForTokenClassification''': ['''BertModelTester'''],
'''BertLMHeadModel''': ['''BertModelTester'''],
'''BertModel''': ['''BertModelTester'''],
}
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''],
'''BlipModel''': ['''BlipModelTester'''],
'''BlipTextModel''': ['''BlipTextModelTester'''],
'''BlipVisionModel''': ['''BlipVisionModelTester'''],
}
self.assertEqual(get_test_info.to_json(_A ) , _A )
self.assertEqual(get_test_info.to_json(_A ) , _A )
| 74 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
snake_case = {"""configuration_vit_mae""": ["""VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMAEConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMAEForPreTraining""",
"""ViTMAELayer""",
"""ViTMAEModel""",
"""ViTMAEPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""TFViTMAEForPreTraining""",
"""TFViTMAEModel""",
"""TFViTMAEPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 67 | 0 |
'''simple docstring'''
def a__ ( lowerCAmelCase__ ) -> list:
if len(lowerCAmelCase__ ) <= 1:
return [tuple(lowerCAmelCase__ )]
UpperCAmelCase__ : int = []
def generate(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase__ : int = [0] * n
res.append(tuple(lowerCAmelCase__ ) )
UpperCAmelCase__ : Any = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
UpperCAmelCase__ , UpperCAmelCase__ : str = arr[i], arr[0]
else:
UpperCAmelCase__ , UpperCAmelCase__ : int = arr[i], arr[c[i]]
res.append(tuple(lowerCAmelCase__ ) )
c[i] += 1
UpperCAmelCase__ : List[Any] = 0
else:
UpperCAmelCase__ : Optional[Any] = 0
i += 1
generate(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
return res
if __name__ == "__main__":
UpperCamelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase__ = [int(item) for item in user_input.split(''',''')]
print(heaps(arr))
| 75 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline | 67 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'sentencepiece.bpe.model'}
a_ = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
}
a_ = {
'moussaKam/mbarthez': 1_0_2_4,
'moussaKam/barthez': 1_0_2_4,
'moussaKam/barthez-orangesum-title': 1_0_2_4,
}
a_ = '▁'
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase =VOCAB_FILES_NAMES
UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase =["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase_ , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
__lowercase : Tuple = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
__lowercase : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
__lowercase : Optional[Any] = vocab_file
__lowercase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase_ ) )
__lowercase : List[Any] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
__lowercase : str = len(self.sp_model ) - 1
__lowercase : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowercase : int = [self.cls_token_id]
__lowercase : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> List[int]:
__lowercase : Any = [self.sep_token_id]
__lowercase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _lowerCamelCase ( self ) -> Optional[Any]:
return len(self.sp_model )
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase : List[str] = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _lowerCamelCase ( self , UpperCamelCase_ ) -> List[str]:
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Optional[Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__lowercase : Optional[Any] = self.sp_model.PieceToId(UpperCamelCase_ )
return spm_id if spm_id else self.unk_token_id
def _lowerCamelCase ( self , UpperCamelCase_ ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(UpperCamelCase_ )
def _lowerCamelCase ( self , UpperCamelCase_ ) -> int:
__lowercase : Union[str, Any] = []
__lowercase : int = ''''''
__lowercase : int = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCamelCase_ ) + token
__lowercase : List[str] = True
__lowercase : Optional[int] = []
else:
current_sub_tokens.append(UpperCamelCase_ )
__lowercase : int = False
out_string += self.sp_model.decode(UpperCamelCase_ )
return out_string.strip()
def __getstate__( self ) -> Any:
__lowercase : Optional[Any] = self.__dict__.copy()
__lowercase : Optional[int] = None
return state
def __setstate__( self , UpperCamelCase_ ) -> Tuple:
__lowercase : str = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__lowercase : Any = {}
__lowercase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> Tuple[str]:
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowercase : Tuple = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , '''wb''' ) as fi:
__lowercase : Any = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
return (out_vocab_file,)
| 76 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""",
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = '''transfo-xl'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['''mems''']
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Optional[Any] ,__A : Union[str, Any]=26_7735 ,__A : List[Any]=[2_0000, 4_0000, 20_0000] ,__A : Dict=1024 ,__A : str=1024 ,__A : Dict=16 ,__A : int=64 ,__A : Dict=4096 ,__A : List[Any]=4 ,__A : Optional[int]=False ,__A : Union[str, Any]=18 ,__A : Tuple=1600 ,__A : str=1000 ,__A : Dict=True ,__A : Dict=True ,__A : int=0 ,__A : Optional[int]=-1 ,__A : int=True ,__A : List[str]=0.1 ,__A : Optional[int]=0.0 ,__A : str=True ,__A : Tuple="normal" ,__A : Union[str, Any]=0.01 ,__A : Tuple=0.01 ,__A : Any=0.02 ,__A : Union[str, Any]=1e-5 ,__A : List[Any]=0 ,**__A : str ,) -> List[Any]:
_lowercase = vocab_size
_lowercase = []
self.cutoffs.extend(__A )
if proj_share_all_but_first:
_lowercase = [False] + [True] * len(self.cutoffs )
else:
_lowercase = [False] + [False] * len(self.cutoffs )
_lowercase = d_model
_lowercase = d_embed
_lowercase = d_head
_lowercase = d_inner
_lowercase = div_val
_lowercase = pre_lnorm
_lowercase = n_layer
_lowercase = n_head
_lowercase = mem_len
_lowercase = same_length
_lowercase = attn_type
_lowercase = clamp_len
_lowercase = sample_softmax
_lowercase = adaptive
_lowercase = dropout
_lowercase = dropatt
_lowercase = untie_r
_lowercase = init
_lowercase = init_range
_lowercase = proj_init_std
_lowercase = init_std
_lowercase = layer_norm_epsilon
super().__init__(eos_token_id=__A ,**__A )
@property
def __UpperCAmelCase ( self : str ) -> Optional[int]:
# Message copied from Transformer-XL documentation
logger.info(F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def __UpperCAmelCase ( self : Any ,__A : Dict ) -> Optional[Any]:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" ) | 67 | 0 |
"""simple docstring"""
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> str:
"""simple docstring"""
__UpperCAmelCase : int = len(UpperCamelCase )
__UpperCAmelCase : int = len(UpperCamelCase )
__UpperCAmelCase : int = (
first_str_length if first_str_length > second_str_length else second_str_length
)
__UpperCAmelCase : list = []
for char_count in range(UpperCamelCase ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(UpperCamelCase )
if __name__ == "__main__":
print(alternative_string_arrange("""AB""", """XYZ"""), end=""" """)
| 77 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = '''dpr'''
def __init__( self : int ,__A : Union[str, Any]=3_0522 ,__A : Optional[int]=768 ,__A : int=12 ,__A : List[Any]=12 ,__A : Optional[Any]=3072 ,__A : Union[str, Any]="gelu" ,__A : Union[str, Any]=0.1 ,__A : List[Any]=0.1 ,__A : str=512 ,__A : List[str]=2 ,__A : Tuple=0.02 ,__A : Tuple=1e-12 ,__A : List[Any]=0 ,__A : List[str]="absolute" ,__A : int = 0 ,**__A : int ,) -> Tuple:
super().__init__(pad_token_id=__A ,**__A )
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = hidden_act
_lowercase = intermediate_size
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = max_position_embeddings
_lowercase = type_vocab_size
_lowercase = initializer_range
_lowercase = layer_norm_eps
_lowercase = projection_dim
_lowercase = position_embedding_type | 67 | 0 |
'''simple docstring'''
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 78 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
snake_case = Lock()
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[int] , snake_case__ :Union[str, Any] , snake_case__ :Tuple , snake_case__ :Any , snake_case__ :Dict , snake_case__ :Optional[int] , snake_case__ :List[str] ) -> Optional[Any]:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(snake_case__ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
_lowercase = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
_lowercase = min(snake_case__ , snake_case__ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(snake_case__ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
_lowercase = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
_lowercase = max(snake_case__ , snake_case__ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> Dict:
_lowercase = []
_lowercase = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
_lowercase = Pipe()
_lowercase = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
_lowercase = temp_rs
_lowercase = temp_rr
for i in range(1 , len(snake_case__ ) - 1 ):
_lowercase = Pipe()
_lowercase = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
_lowercase = temp_rs
_lowercase = temp_rr
process_array_.append(
Process(
target=snake_case__ , args=(
len(snake_case__ ) - 1,
arr[len(snake_case__ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(snake_case__ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(snake_case__ ) ):
_lowercase = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
_lowercase = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*snake_case__ )
_lowercase = odd_even_transposition(snake_case__ )
print('Sorted List\n' )
print(*snake_case__ )
if __name__ == "__main__":
main() | 67 | 0 |
SCREAMING_SNAKE_CASE__ : Dict = 6_55_21
def _lowerCamelCase ( __lowerCamelCase ) -> int:
'''simple docstring'''
UpperCAmelCase__ : str = 1
UpperCAmelCase__ : Tuple = 0
for plain_chr in plain_text:
UpperCAmelCase__ : int = (a + ord(__lowerCamelCase )) % MOD_ADLER
UpperCAmelCase__ : List[str] = (b + a) % MOD_ADLER
return (b << 16) | a
| 79 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""",
"""google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""",
"""google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = '''big_bird'''
def __init__( self : str ,__A : Union[str, Any]=5_0358 ,__A : Any=768 ,__A : List[str]=12 ,__A : Union[str, Any]=12 ,__A : int=3072 ,__A : Tuple="gelu_new" ,__A : Any=0.1 ,__A : Optional[Any]=0.1 ,__A : Tuple=4096 ,__A : int=2 ,__A : Union[str, Any]=0.02 ,__A : Optional[int]=1e-12 ,__A : List[str]=True ,__A : List[Any]=0 ,__A : Optional[Any]=1 ,__A : Optional[int]=2 ,__A : Optional[int]=66 ,__A : Tuple="block_sparse" ,__A : Optional[int]=True ,__A : Optional[int]=False ,__A : Tuple=64 ,__A : str=3 ,__A : Optional[int]=None ,**__A : Dict ,) -> Union[str, Any]:
super().__init__(
pad_token_id=__A ,bos_token_id=__A ,eos_token_id=__A ,sep_token_id=__A ,**__A ,)
_lowercase = vocab_size
_lowercase = max_position_embeddings
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = intermediate_size
_lowercase = hidden_act
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = initializer_range
_lowercase = type_vocab_size
_lowercase = layer_norm_eps
_lowercase = use_cache
_lowercase = rescale_embeddings
_lowercase = attention_type
_lowercase = use_bias
_lowercase = block_size
_lowercase = num_random_blocks
_lowercase = classifier_dropout
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : int ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_lowercase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowercase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] ) | 67 | 0 |
from __future__ import annotations
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
__lowercase , __lowercase = array[indexa], array[indexa]
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if length > 1:
__lowercase = int(length / 2 )
for i in range(lowerCamelCase , low + middle ):
comp_and_swap(lowerCamelCase , lowerCamelCase , i + middle , lowerCamelCase )
bitonic_merge(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
bitonic_merge(lowerCamelCase , low + middle , lowerCamelCase , lowerCamelCase )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if length > 1:
__lowercase = int(length / 2 )
bitonic_sort(lowerCamelCase , lowerCamelCase , lowerCamelCase , 1 )
bitonic_sort(lowerCamelCase , low + middle , lowerCamelCase , 0 )
bitonic_merge(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase : Tuple = input("""Enter numbers separated by a comma:\n""").strip()
__UpperCamelCase : int = [int(item.strip()) for item in user_input.split(""",""")]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print("""\nSorted array in ascending order is: """, end="""""")
print(*unsorted, sep=""", """)
bitonic_merge(unsorted, 0, len(unsorted), 0)
print("""Sorted array in descending order is: """, end="""""")
print(*unsorted, sep=""", """)
| 80 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> list:
_lowercase = [0] * len(snake_case__ )
for i in range(1 , len(snake_case__ ) ):
# use last results for better performance - dynamic programming
_lowercase = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
_lowercase = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
_lowercase = j
return prefix_result
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> int:
return max(prefix_function(snake_case__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 67 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case : List[Any] = logging.get_logger(__name__)
_snake_case : Optional[int] = {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json",
"google/bigbird-roberta-large": "https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json",
"google/bigbird-base-trivia-itc": "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = "big_bird"
def __init__( self : Union[str, Any] , lowerCamelCase : Union[str, Any]=50358 , lowerCamelCase : List[str]=768 , lowerCamelCase : Any=12 , lowerCamelCase : List[str]=12 , lowerCamelCase : Any=3072 , lowerCamelCase : Optional[Any]="gelu_new" , lowerCamelCase : Optional[Any]=0.1 , lowerCamelCase : int=0.1 , lowerCamelCase : str=4096 , lowerCamelCase : Optional[Any]=2 , lowerCamelCase : Dict=0.02 , lowerCamelCase : int=1E-12 , lowerCamelCase : Dict=True , lowerCamelCase : Union[str, Any]=0 , lowerCamelCase : List[str]=1 , lowerCamelCase : str=2 , lowerCamelCase : Tuple=66 , lowerCamelCase : Dict="block_sparse" , lowerCamelCase : List[str]=True , lowerCamelCase : Dict=False , lowerCamelCase : str=64 , lowerCamelCase : List[Any]=3 , lowerCamelCase : List[Any]=None , **lowerCamelCase : Union[str, Any] , ) -> str:
super().__init__(
pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , sep_token_id=lowerCamelCase , **lowerCamelCase , )
__snake_case : Optional[int] = vocab_size
__snake_case : Any = max_position_embeddings
__snake_case : int = hidden_size
__snake_case : List[str] = num_hidden_layers
__snake_case : int = num_attention_heads
__snake_case : Dict = intermediate_size
__snake_case : str = hidden_act
__snake_case : List[str] = hidden_dropout_prob
__snake_case : str = attention_probs_dropout_prob
__snake_case : Optional[int] = initializer_range
__snake_case : Any = type_vocab_size
__snake_case : List[Any] = layer_norm_eps
__snake_case : Optional[Any] = use_cache
__snake_case : List[Any] = rescale_embeddings
__snake_case : int = attention_type
__snake_case : int = use_bias
__snake_case : List[str] = block_size
__snake_case : Tuple = num_random_blocks
__snake_case : Union[str, Any] = classifier_dropout
class a (_lowerCAmelCase ):
"""simple docstring"""
@property
def __snake_case ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__snake_case : str = {0: "batch", 1: "choice", 2: "sequence"}
else:
__snake_case : Optional[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 81 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> Union[str, Any]:
_lowercase = len(snake_case__ )
_lowercase = sum(snake_case__ )
_lowercase = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
_lowercase = True
for i in range(1 , s + 1 ):
_lowercase = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
_lowercase = dp[i][j - 1]
if arr[i - 1] <= j:
_lowercase = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
_lowercase = s - 2 * j
break
return diff | 67 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase = {
"""configuration_blip_2""": [
"""BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Blip2Config""",
"""Blip2QFormerConfig""",
"""Blip2VisionConfig""",
],
"""processing_blip_2""": ["""Blip2Processor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Blip2Model""",
"""Blip2QFormerModel""",
"""Blip2PreTrainedModel""",
"""Blip2ForConditionalGeneration""",
"""Blip2VisionModel""",
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 82 |
from manim import *
class A_ ( UpperCAmelCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
_lowercase = Rectangle(height=0.5 ,width=0.5 )
_lowercase = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
_lowercase = Rectangle(height=0.25 ,width=0.25 )
_lowercase = [mem.copy() for i in range(6 )]
_lowercase = [mem.copy() for i in range(6 )]
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = VGroup(__A ,__A ).arrange(__A ,buff=0 )
_lowercase = Text('CPU' ,font_size=24 )
_lowercase = Group(__A ,__A ).arrange(__A ,buff=0.5 ,aligned_edge=__A )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__A )
_lowercase = [mem.copy() for i in range(4 )]
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = Text('GPU' ,font_size=24 )
_lowercase = Group(__A ,__A ).arrange(__A ,buff=0.5 ,aligned_edge=__A )
gpu.move_to([-1, -1, 0] )
self.add(__A )
_lowercase = [mem.copy() for i in range(6 )]
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = Text('Model' ,font_size=24 )
_lowercase = Group(__A ,__A ).arrange(__A ,buff=0.5 ,aligned_edge=__A )
model.move_to([3, -1.0, 0] )
self.add(__A )
_lowercase = []
_lowercase = []
for i, rect in enumerate(__A ):
_lowercase = fill.copy().set_fill(__A ,opacity=0.8 )
target.move_to(__A )
model_arr.append(__A )
_lowercase = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(__A ,opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(__A )
self.add(*__A ,*__A )
_lowercase = [meta_mem.copy() for i in range(6 )]
_lowercase = [meta_mem.copy() for i in range(6 )]
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = VGroup(__A ,__A ).arrange(__A ,buff=0 )
_lowercase = Text('Disk' ,font_size=24 )
_lowercase = Group(__A ,__A ).arrange(__A ,buff=0.5 ,aligned_edge=__A )
disk.move_to([-4, -1.25, 0] )
self.add(__A ,__A )
_lowercase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_lowercase = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
self.add(__A ,__A )
_lowercase = MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" ,font_size=18 ,)
blue_text.next_to(__A ,DOWN * 2.4 ,aligned_edge=key_text.get_left() )
self.add(__A )
_lowercase = MarkupText(
F"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__A ) )
_lowercase = Square(0.3 )
input.set_fill(__A ,opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] ,__A ,buff=0.5 )
self.play(Write(__A ) )
input.generate_target()
input.target.next_to(model_arr[0] ,direction=__A ,buff=0.02 )
self.play(MoveToTarget(__A ) )
self.play(FadeOut(__A ) )
_lowercase = Arrow(start=__A ,end=__A ,color=__A ,buff=0.5 )
a.next_to(model_arr[0].get_left() ,__A ,buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
_lowercase = MarkupText(
F"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__A ,run_time=3 ) )
_lowercase = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(__A ) ,Circumscribe(model_arr[0] ,color=__A ,**__A ) ,Circumscribe(model_cpu_arr[0] ,color=__A ,**__A ) ,Circumscribe(gpu_rect[0] ,color=__A ,**__A ) ,)
self.play(MoveToTarget(model_cpu_arr[0] ) )
_lowercase = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 ,__A ,buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
_lowercase = AnimationGroup(
FadeOut(__A ,run_time=0.5 ) ,MoveToTarget(__A ,run_time=0.5 ) ,FadeIn(__A ,run_time=0.5 ) ,lag_ratio=0.2 )
self.play(__A )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
_lowercase = 0.7
self.play(
Circumscribe(model_arr[i] ,**__A ) ,Circumscribe(cpu_left_col_base[i] ,**__A ) ,Circumscribe(cpu_left_col_base[i + 1] ,color=__A ,**__A ) ,Circumscribe(gpu_rect[0] ,color=__A ,**__A ) ,Circumscribe(model_arr[i + 1] ,color=__A ,**__A ) ,)
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) ,MoveToTarget(model_cpu_arr[i + 1] ) ,)
else:
self.play(
MoveToTarget(model_cpu_arr[i] ,run_time=0.7 ) ,MoveToTarget(model_cpu_arr[i + 1] ,run_time=0.7 ) ,)
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() ,RIGHT + 0.02 ,buff=0.2 )
self.play(
Circumscribe(model_arr[-1] ,color=__A ,**__A ) ,Circumscribe(cpu_left_col_base[-1] ,color=__A ,**__A ) ,Circumscribe(gpu_rect[0] ,color=__A ,**__A ) ,)
self.play(MoveToTarget(model_cpu_arr[i] ) )
_lowercase = a_c
_lowercase = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] ,RIGHT + 0.02 ,buff=0.5 )
self.play(
FadeOut(__A ) ,FadeOut(__A ,run_time=0.5 ) ,)
_lowercase = MarkupText(F"""Inference on a model too large for GPU memory\nis successfully completed.""" ,font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__A ,run_time=3 ) ,MoveToTarget(__A ) )
self.wait() | 67 | 0 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
lowerCAmelCase__ = '''hf-internal-testing/tiny-random-bert'''
lowerCAmelCase__ = os.path.join(TRANSFORMERS_CACHE, '''models--hf-internal-testing--tiny-random-bert''')
lowerCAmelCase__ = '''9b8c223d42b2188cb49d29af482996f9d0f3e5a6'''
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = cached_file(__lowerCAmelCase , __lowerCAmelCase )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(__lowerCAmelCase ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) ) )
with open(os.path.join(__lowerCAmelCase , '''refs''' , '''main''' ) ) as f:
_lowerCamelCase : Optional[int] = f.read()
self.assertEqual(__lowerCAmelCase , os.path.join(__lowerCAmelCase , '''snapshots''' , __lowerCAmelCase , __lowerCAmelCase ) )
self.assertTrue(os.path.isfile(__lowerCAmelCase ) )
# File is cached at the same place the second time.
_lowerCamelCase : Tuple = cached_file(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
# Using a specific revision to test the full commit hash.
_lowerCamelCase : Dict = cached_file(__lowerCAmelCase , __lowerCAmelCase , revision='''9b8c223''' )
self.assertEqual(__lowerCAmelCase , os.path.join(__lowerCAmelCase , '''snapshots''' , __lowerCAmelCase , __lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
with self.assertRaisesRegex(__lowerCAmelCase , '''is not a valid model identifier''' ):
_lowerCamelCase : Optional[int] = cached_file('''tiny-random-bert''' , __lowerCAmelCase )
with self.assertRaisesRegex(__lowerCAmelCase , '''is not a valid git identifier''' ):
_lowerCamelCase : str = cached_file(__lowerCAmelCase , __lowerCAmelCase , revision='''aaaa''' )
with self.assertRaisesRegex(__lowerCAmelCase , '''does not appear to have a file named''' ):
_lowerCamelCase : int = cached_file(__lowerCAmelCase , '''conf''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
with self.assertRaisesRegex(__lowerCAmelCase , '''does not appear to have a file named''' ):
_lowerCamelCase : Dict = cached_file(__lowerCAmelCase , '''conf''' )
with open(os.path.join(__lowerCAmelCase , '''refs''' , '''main''' ) ) as f:
_lowerCamelCase : List[Any] = f.read()
self.assertTrue(os.path.isfile(os.path.join(__lowerCAmelCase , '''.no_exist''' , __lowerCAmelCase , '''conf''' ) ) )
_lowerCamelCase : str = cached_file(__lowerCAmelCase , '''conf''' , _raise_exceptions_for_missing_entries=__lowerCAmelCase )
self.assertIsNone(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = cached_file(__lowerCAmelCase , '''conf''' , local_files_only=__lowerCAmelCase , _raise_exceptions_for_missing_entries=__lowerCAmelCase )
self.assertIsNone(__lowerCAmelCase )
_lowerCamelCase : Any = mock.Mock()
_lowerCamelCase : Optional[Any] = 5_0_0
_lowerCamelCase : Dict = {}
_lowerCamelCase : List[Any] = HTTPError
_lowerCamelCase : int = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=__lowerCAmelCase ) as mock_head:
_lowerCamelCase : Union[str, Any] = cached_file(__lowerCAmelCase , '''conf''' , _raise_exceptions_for_connection_errors=__lowerCAmelCase )
self.assertIsNone(__lowerCAmelCase )
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __lowerCAmelCase ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __lowerCAmelCase ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(__lowerCAmelCase , '''is not a valid model identifier''' ):
get_file_from_repo('''bert-base-case''' , __lowerCAmelCase )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(__lowerCAmelCase , '''is not a valid git identifier''' ):
get_file_from_repo('''bert-base-cased''' , __lowerCAmelCase , revision='''ahaha''' )
_lowerCamelCase : Dict = get_file_from_repo('''bert-base-cased''' , __lowerCAmelCase )
# The name is the cached name which is not very easy to test, so instead we load the content.
_lowerCamelCase : Dict = json.loads(open(__lowerCAmelCase , '''r''' ).read() )
self.assertEqual(config['''hidden_size'''] , 7_6_8 )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase : Any = Path(__lowerCAmelCase ) / '''a.txt'''
filename.touch()
self.assertEqual(get_file_from_repo(__lowerCAmelCase , '''a.txt''' ) , str(__lowerCAmelCase ) )
self.assertIsNone(get_file_from_repo(__lowerCAmelCase , '''b.txt''' ) )
| 83 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class A_ :
"""simple docstring"""
def __init__( self : Dict ,__A : Any ,__A : Tuple=None ,__A : Optional[int]=None ,__A : Optional[int]=None ,__A : int="resnet50" ,__A : int=3 ,__A : List[Any]=32 ,__A : Tuple=3 ,__A : List[Any]=True ,__A : Tuple=True ,) -> Any:
_lowercase = parent
_lowercase = out_indices if out_indices is not None else [4]
_lowercase = stage_names
_lowercase = out_features
_lowercase = backbone
_lowercase = batch_size
_lowercase = image_size
_lowercase = num_channels
_lowercase = use_pretrained_backbone
_lowercase = is_training
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
_lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase = self.get_config()
return config, pixel_values
def __UpperCAmelCase ( self : Tuple ) -> Tuple:
return TimmBackboneConfig(
image_size=self.image_size ,num_channels=self.num_channels ,out_features=self.out_features ,out_indices=self.out_indices ,stage_names=self.stage_names ,use_pretrained_backbone=self.use_pretrained_backbone ,backbone=self.backbone ,)
def __UpperCAmelCase ( self : Any ,__A : Any ,__A : Dict ) -> Union[str, Any]:
_lowercase = TimmBackbone(config=__A )
model.to(__A )
model.eval()
with torch.no_grad():
_lowercase = model(__A )
self.parent.assertEqual(
result.feature_map[-1].shape ,(self.batch_size, model.channels[-1], 14, 14) ,)
def __UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
_lowercase = self.prepare_config_and_inputs()
_lowercase , _lowercase = config_and_inputs
_lowercase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class A_ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (TimmBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : List[str] = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : Tuple = False
SCREAMING_SNAKE_CASE_ : List[str] = False
SCREAMING_SNAKE_CASE_ : Any = False
def __UpperCAmelCase ( self : str ) -> Optional[int]:
_lowercase = TimmBackboneModelTester(self )
_lowercase = ConfigTester(self ,config_class=__A ,has_text_modality=__A )
def __UpperCAmelCase ( self : int ) -> Tuple:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCAmelCase ( self : List[Any] ) -> List[str]:
_lowercase = 'resnet18'
_lowercase = 'microsoft/resnet-18'
_lowercase = AutoBackbone.from_pretrained(__A ,use_timm_backbone=__A )
_lowercase = AutoBackbone.from_pretrained(__A )
self.assertEqual(len(timm_model.out_features ) ,len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) ,len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels ,transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices ,(-1,) )
self.assertEqual(transformers_model.out_indices ,[len(timm_model.stage_names ) - 1] )
_lowercase = AutoBackbone.from_pretrained(__A ,use_timm_backbone=__A ,out_indices=[1, 2, 3] )
_lowercase = AutoBackbone.from_pretrained(__A ,out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices ,transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) ,len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels ,transformers_model.channels )
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking' )
def __UpperCAmelCase ( self : Optional[int] ) -> Any:
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' )
def __UpperCAmelCase ( self : Optional[int] ) -> Any:
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side' )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __UpperCAmelCase ( self : Any ) -> List[Any]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __UpperCAmelCase ( self : int ) -> Any:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' )
def __UpperCAmelCase ( self : Any ) -> Optional[Any]:
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.' )
def __UpperCAmelCase ( self : Any ) -> Any:
pass
@unittest.skip('Safetensors is not supported by timm.' )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __UpperCAmelCase ( self : int ) -> Optional[Any]:
pass
def __UpperCAmelCase ( self : Dict ) -> int:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = model_class(__A )
_lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase = [*signature.parameters.keys()]
_lowercase = ['pixel_values']
self.assertListEqual(arg_names[:1] ,__A )
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase = True
_lowercase = self.has_attentions
# no need to test all models as different heads yield the same functionality
_lowercase = self.all_model_classes[0]
_lowercase = model_class(__A )
model.to(__A )
_lowercase = self._prepare_for_class(__A ,__A )
_lowercase = model(**__A )
_lowercase = outputs[0][-1]
# Encoder-/Decoder-only models
_lowercase = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
_lowercase = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__A )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __UpperCAmelCase ( self : List[str] ) -> int:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = model_class(__A )
model.to(__A )
model.eval()
_lowercase = model(**__A )
self.assertEqual(len(result.feature_maps ) ,len(config.out_indices ) )
self.assertEqual(len(model.channels ) ,len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
_lowercase = copy.deepcopy(__A )
_lowercase = None
_lowercase = model_class(__A )
model.to(__A )
model.eval()
_lowercase = model(**__A )
self.assertEqual(len(result.feature_maps ) ,1 )
self.assertEqual(len(model.channels ) ,1 )
# Check backbone can be initialized with fresh weights
_lowercase = copy.deepcopy(__A )
_lowercase = False
_lowercase = model_class(__A )
model.to(__A )
model.eval()
_lowercase = model(**__A ) | 67 | 0 |
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False ):
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_3170_4406_4679_8873_8596_1981 and not allow_probable:
raise ValueError(
'Warning: upper bound of deterministic test is exceeded. '
'Pass allow_probable=True to allow probabilistic test. '
'A return value of True indicates a probable prime.' )
# array bounds provided by analysis
lowercase = [
2047,
137_3653,
2532_6001,
32_1503_1751,
2_1523_0289_8747,
3_4747_4966_0383,
341_5500_7172_8321,
1,
382_5123_0565_4641_3051,
1,
1,
3186_6585_7834_0311_5116_7461,
3_3170_4406_4679_8873_8596_1981,
]
lowercase = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(__SCREAMING_SNAKE_CASE , 1 ):
if n < _p:
# then we have our last prime to check
lowercase = primes[:idx]
break
lowercase , lowercase = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
lowercase = False
for r in range(__SCREAMING_SNAKE_CASE ):
lowercase = pow(__SCREAMING_SNAKE_CASE , d * 2**r , __SCREAMING_SNAKE_CASE )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
lowercase = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def UpperCAmelCase_ ( ):
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(83_8201 )
assert miller_rabin(83_8207 )
# 1_373_653
assert not miller_rabin(1731_6001 )
assert miller_rabin(1731_6017 )
# 25_326_001
assert not miller_rabin(30_7838_6641 )
assert miller_rabin(30_7838_6653 )
# 3_215_031_751
assert not miller_rabin(1_7130_4557_4801 )
assert miller_rabin(1_7130_4557_4819 )
# 2_152_302_898_747
assert not miller_rabin(2_7797_9972_8307 )
assert miller_rabin(2_7797_9972_8327 )
# 3_474_749_660_383
assert not miller_rabin(113_8500_2390_9441 )
assert miller_rabin(113_8500_2390_9527 )
# 341_550_071_728_321
assert not miller_rabin(127_5041_0188_4880_4351 )
assert miller_rabin(127_5041_0188_4880_4391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(796_6646_4458_5077_8779_1867 )
assert miller_rabin(796_6646_4458_5077_8779_1951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(5528_4067_7446_6478_9766_0333 )
assert miller_rabin(5528_4067_7446_6478_9766_0359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 84 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 67 | 0 |
from __future__ import annotations
def _a ( lowercase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [True] * limit
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
SCREAMING_SNAKE_CASE__ : Dict = False
SCREAMING_SNAKE_CASE__ : int = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = i * 2
while index < limit:
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : Tuple = index + i
SCREAMING_SNAKE_CASE__ : List[str] = [2]
for i in range(3 , lowercase__ , 2 ):
if is_prime[i]:
primes.append(lowercase__ )
return primes
def _a ( lowercase__ : int = 1_00_00_00 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = prime_sieve(lowercase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
for i in range(len(lowercase__ ) ):
for j in range(i + length , len(lowercase__ ) ):
SCREAMING_SNAKE_CASE__ : int = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
SCREAMING_SNAKE_CASE__ : Any = j - i
SCREAMING_SNAKE_CASE__ : int = sol
return largest
if __name__ == "__main__":
print(F"""{solution() = }""")
| 85 |
snake_case = {str(digit): digit**5 for digit in range(1_0)}
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> int:
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(snake_case__ ) )
def SCREAMING_SNAKE_CASE__ ( ) -> int:
return sum(
number
for number in range(1000 , 100_0000 )
if number == digits_fifth_powers_sum(snake_case__ ) )
if __name__ == "__main__":
print(solution()) | 67 | 0 |
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
__a :Tuple = '\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n'
__a :List[Any] = '\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper "Evaluating Large Language Models Trained on Code"\n(https://arxiv.org/abs/2107.03374).\n'
__a :List[str] = '\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric("code_eval")\n >>> test_cases = ["assert add(2,3)==5"]\n >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n'
__a :List[str] = '\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe "code_eval" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper "Evaluating Large\nLanguage Models Trained on Code" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"\n\n################################################################################\\n'
__a :str = 'The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def __A ( self : Dict ):
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/openai/human-eval" , codebase_urls=["https://github.com/openai/human-eval"] , reference_urls=["https://github.com/openai/human-eval"] , license=_LICENSE , )
def __A ( self : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any]=[1, 10, 100] , UpperCAmelCase : Tuple=4 , UpperCAmelCase : Union[str, Any]=3.0 ):
if os.getenv("HF_ALLOW_CODE_EVAL" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("This metric is currently not supported on Windows." )
with ThreadPoolExecutor(max_workers=UpperCAmelCase ) as executor:
A_ = []
A_ = Counter()
A_ = 0
A_ = defaultdict(UpperCAmelCase )
for task_id, (candidates, test_case) in enumerate(zip(UpperCAmelCase , UpperCAmelCase ) ):
for candidate in candidates:
A_ = candidate + "\n" + test_case
A_ = (test_program, timeout, task_id, completion_id[task_id])
A_ = executor.submit(UpperCAmelCase , *UpperCAmelCase )
futures.append(UpperCAmelCase )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(UpperCAmelCase ):
A_ = future.result()
results[result["task_id"]].append((result["completion_id"], result) )
A_ , A_ = [], []
for result in results.values():
result.sort()
A_ = [r[1]["passed"] for r in result]
total.append(len(UpperCAmelCase ) )
correct.append(sum(UpperCAmelCase ) )
A_ = np.array(UpperCAmelCase )
A_ = np.array(UpperCAmelCase )
A_ = k
A_ = {f'''pass@{k}''': estimate_pass_at_k(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Dict ,__UpperCamelCase : Any ):
"""simple docstring"""
def estimator(__UpperCamelCase : int ,__UpperCamelCase : int ,__UpperCamelCase : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 ,n + 1 ) )
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = itertools.repeat(__UpperCamelCase ,len(__UpperCamelCase ) )
else:
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
A_ = iter(__UpperCamelCase )
return np.array([estimator(int(__UpperCamelCase ) ,int(__UpperCamelCase ) ,__UpperCamelCase ) for n, c in zip(__UpperCamelCase ,__UpperCamelCase )] ) | 86 |
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> list[int]:
_lowercase = str(snake_case__ )
_lowercase = [n]
for i in range(1 , len(snake_case__ ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> bool:
if len(str(snake_case__ ) ) > 3:
if not is_prime(int(str(snake_case__ )[-3:] ) ) or not is_prime(int(str(snake_case__ )[:3] ) ):
return False
return True
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int = 11 ) -> list[int]:
_lowercase = []
_lowercase = 13
while len(snake_case__ ) != count:
if validate(snake_case__ ):
_lowercase = list_truncated_nums(snake_case__ )
if all(is_prime(snake_case__ ) for i in list_nums ):
list_truncated_primes.append(snake_case__ )
num += 2
return list_truncated_primes
def SCREAMING_SNAKE_CASE__ ( ) -> int:
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F"""{sum(compute_truncated_primes(1_1)) = }""") | 67 | 0 |
import re
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> str:
"""simple docstring"""
if len(re.findall('''[ATCG]''' , lowercase_ ) ) != len(lowercase_ ):
raise ValueError('''Invalid Strand''' )
return dna.translate(dna.maketrans('''ATCG''' , '''TAGC''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 87 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class A_ :
"""simple docstring"""
def __init__( self : Optional[Any] ,__A : Tuple ,__A : Any=99 ,__A : Any=13 ,__A : Dict=7 ,__A : List[Any]=9 ,__A : Dict=True ,__A : Any=True ,__A : Tuple=False ,__A : str=32 ,__A : int=5 ,__A : List[str]=4 ,__A : Optional[Any]=37 ,__A : int=8 ,__A : Any=0.1 ,__A : Dict=0.002 ,__A : Union[str, Any]=1 ,__A : Optional[Any]=0 ,__A : int=0 ,__A : Tuple=None ,__A : str=None ,) -> List[Any]:
_lowercase = parent
_lowercase = batch_size
_lowercase = encoder_seq_length
_lowercase = decoder_seq_length
# For common tests
_lowercase = self.decoder_seq_length
_lowercase = is_training
_lowercase = use_attention_mask
_lowercase = use_labels
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = d_ff
_lowercase = relative_attention_num_buckets
_lowercase = dropout_rate
_lowercase = initializer_factor
_lowercase = eos_token_id
_lowercase = pad_token_id
_lowercase = decoder_start_token_id
_lowercase = None
_lowercase = decoder_layers
def __UpperCAmelCase ( self : Dict ) -> Dict:
return TaConfig.from_pretrained('google/umt5-base' )
def __UpperCAmelCase ( self : Optional[int] ,__A : Optional[int] ,__A : int ,__A : str ,__A : List[str]=None ,__A : List[str]=None ,__A : Any=None ,__A : List[Any]=None ,__A : str=None ,) -> Tuple:
if attention_mask is None:
_lowercase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_lowercase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_lowercase = torch.ones(config.num_hidden_layers ,config.num_attention_heads ,device=__A )
if decoder_head_mask is None:
_lowercase = torch.ones(config.num_decoder_layers ,config.num_attention_heads ,device=__A )
if cross_attn_head_mask is None:
_lowercase = torch.ones(
config.num_decoder_layers ,config.num_attention_heads ,device=__A )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def __UpperCAmelCase ( self : Union[str, Any] ) -> int:
_lowercase = ids_tensor([self.batch_size, self.encoder_seq_length] ,self.vocab_size )
_lowercase = ids_tensor([self.batch_size, self.decoder_seq_length] ,self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_lowercase = input_ids.clamp(self.pad_token_id + 1 )
_lowercase = decoder_input_ids.clamp(self.pad_token_id + 1 )
_lowercase = self.get_config()
_lowercase = config.num_attention_heads
_lowercase = self.prepare_inputs_dict(__A ,__A ,__A )
return config, input_dict
def __UpperCAmelCase ( self : Dict ) -> str:
_lowercase , _lowercase = self.prepare_config_and_inputs()
return config, inputs_dict
def __UpperCAmelCase ( self : Dict ) -> Tuple:
return TaConfig(
vocab_size=166 ,d_model=self.hidden_size ,d_ff=self.d_ff ,d_kv=self.hidden_size // self.num_attention_heads ,num_layers=self.num_hidden_layers ,num_decoder_layers=self.decoder_layers ,num_heads=self.num_attention_heads ,relative_attention_num_buckets=self.relative_attention_num_buckets ,dropout_rate=self.dropout_rate ,initializer_factor=self.initializer_factor ,eos_token_id=self.eos_token_id ,bos_token_id=self.pad_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.decoder_start_token_id ,)
def __UpperCAmelCase ( self : Dict ) -> Any:
return TaConfig(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,d_ff=self.d_ff ,d_kv=self.hidden_size // self.num_attention_heads ,num_layers=self.num_hidden_layers ,num_decoder_layers=self.decoder_layers ,num_heads=self.num_attention_heads ,relative_attention_num_buckets=self.relative_attention_num_buckets ,dropout_rate=self.dropout_rate ,initializer_factor=self.initializer_factor ,eos_token_id=self.eos_token_id ,bos_token_id=self.pad_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.decoder_start_token_id ,)
def __UpperCAmelCase ( self : Union[str, Any] ,__A : List[str] ,__A : Dict ,__A : List[str] ,__A : List[Any] ,__A : Tuple ,__A : int ,) -> Tuple:
_lowercase = UMTaModel(config=__A )
model.to(__A )
model.eval()
_lowercase = model(
input_ids=__A ,decoder_input_ids=__A ,attention_mask=__A ,decoder_attention_mask=__A ,)
_lowercase = model(input_ids=__A ,decoder_input_ids=__A )
_lowercase = result.last_hidden_state
_lowercase = result.past_key_values
_lowercase = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() ,(self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() ,(self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(__A ) ,config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) ,4 )
def __UpperCAmelCase ( self : List[Any] ,__A : Tuple ,__A : int ,__A : Any ,__A : Tuple ,__A : Any ,__A : Optional[int] ,) -> List[str]:
_lowercase = UMTaModel(config=__A ).get_decoder().to(__A ).eval()
# first forward pass
_lowercase = model(__A ,use_cache=__A )
_lowercase = model(__A )
_lowercase = model(__A ,use_cache=__A )
self.parent.assertTrue(len(__A ) == len(__A ) )
self.parent.assertTrue(len(__A ) == len(__A ) + 1 )
_lowercase , _lowercase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowercase = ids_tensor((self.batch_size, 1) ,config.vocab_size )
# append to next input_ids and
_lowercase = torch.cat([input_ids, next_tokens] ,dim=-1 )
_lowercase = model(__A )['last_hidden_state']
_lowercase = model(__A ,past_key_values=__A )['last_hidden_state']
# select random slice
_lowercase = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
_lowercase = output_from_no_past[:, -1, random_slice_idx].detach()
_lowercase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__A ,__A ,atol=1e-3 ) )
def __UpperCAmelCase ( self : Any ,__A : List[str] ,__A : List[str] ,) -> int:
_lowercase = UMTaModel(config=__A ).to(__A ).half().eval()
_lowercase = model(**__A )['last_hidden_state']
self.parent.assertFalse(torch.isnan(__A ).any().item() )
@require_torch
class A_ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE_ : Tuple = (UMTaForConditionalGeneration,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : str = (
{
'''conversational''': UMTaForConditionalGeneration,
'''feature-extraction''': UMTaModel,
'''summarization''': UMTaForConditionalGeneration,
'''text2text-generation''': UMTaForConditionalGeneration,
'''translation''': UMTaForConditionalGeneration,
'''question-answering''': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
SCREAMING_SNAKE_CASE_ : int = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : List[Any] = True
SCREAMING_SNAKE_CASE_ : int = True
# The small UMT5 model needs higher percentages for CPU/MP tests
SCREAMING_SNAKE_CASE_ : Dict = [0.8, 0.9]
def __UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
_lowercase = UMTaModelTester(self )
@unittest.skip('Test has a segmentation fault on torch 1.8.0' )
def __UpperCAmelCase ( self : int ) -> str:
_lowercase = self.model_tester.prepare_config_and_inputs()
_lowercase = UMTaModel(config_and_inputs[0] ).to(__A )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
__A ,(config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) ,F"""{tmpdirname}/t5_test.onnx""" ,export_params=__A ,opset_version=9 ,input_names=['input_ids', 'decoder_input_ids'] ,)
@unittest.skipIf(torch_device == 'cpu' ,'Cant do half precision' )
def __UpperCAmelCase ( self : List[Any] ) -> str:
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*__A )
def __UpperCAmelCase ( self : List[str] ) -> int:
_lowercase = ['encoder_attentions', 'decoder_attentions', 'cross_attentions']
_lowercase = self.model_tester.prepare_config_and_inputs()
_lowercase = config_and_inputs[0]
_lowercase = UMTaForConditionalGeneration(__A ).eval()
model.to(__A )
_lowercase = {
'head_mask': torch.zeros(config.num_layers ,config.num_heads ,device=__A ),
'decoder_head_mask': torch.zeros(config.num_decoder_layers ,config.num_heads ,device=__A ),
'cross_attn_head_mask': torch.zeros(config.num_decoder_layers ,config.num_heads ,device=__A ),
}
for attn_name, (name, mask) in zip(__A ,head_masking.items() ):
_lowercase = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
_lowercase = torch.ones(
config.num_decoder_layers ,config.num_heads ,device=__A )
_lowercase = model.generate(
config_and_inputs[1]['input_ids'] ,num_beams=1 ,max_length=3 ,output_attentions=__A ,return_dict_in_generate=__A ,**__A ,)
# We check the state of decoder_attentions and cross_attentions just from the last step
_lowercase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) ,0.0 )
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.' )
def __UpperCAmelCase ( self : str ) -> List[Any]:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class A_ ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged' )
def __UpperCAmelCase ( self : int ) -> List[str]:
_lowercase = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' ,return_dict=__A ).to(__A )
_lowercase = AutoTokenizer.from_pretrained('google/umt5-small' ,use_fast=__A ,legacy=__A )
_lowercase = [
'Bonjour monsieur <extra_id_0> bien <extra_id_1>.',
'No se como puedo <extra_id_0>.',
'This is the reason why we <extra_id_0> them.',
'The <extra_id_0> walks in <extra_id_1>, seats',
'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.',
]
_lowercase = tokenizer(__A ,return_tensors='pt' ,padding=__A ).input_ids
# fmt: off
_lowercase = torch.tensor(
[
[ 3_8530, 21_0703, 25_6299, 1410, 25_6298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 2_5922, 25_6299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 1_9014, 1_0620, 758, 25_6299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 25_6299, 1_4869, 281, 301, 25_6298, 275, 11_9983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 25_6299, 1_4869, 281, 2234, 289, 2275, 333,6_1391, 289, 25_6298, 543, 25_6297, 16_8714, 329, 25_6296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(__A ,__A )
_lowercase = model.generate(input_ids.to(__A ) )
_lowercase = [
'<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>',
'<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
]
_lowercase = tokenizer.batch_decode(__A )
self.assertEqual(__A ,__A ) | 67 | 0 |
"""simple docstring"""
def _snake_case ( __snake_case : list ):
"""simple docstring"""
if any(not isinstance(__snake_case , __snake_case ) or x < 0 for x in sequence ):
raise TypeError("""Sequence must be list of non-negative integers""" )
for _ in range(len(__snake_case ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(__snake_case , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 88 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue_model_parallelism.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1_6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1_6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
] )
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Any ) -> Optional[int]:
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() ,encoding='utf-8' ,check=__A ,)
assert hasattr(self ,'env' )
def __UpperCAmelCase ( self : str ,__A : Tuple ) -> int:
# configuration for running training on smdistributed Model Parallel
_lowercase = {
'enabled': True,
'processes_per_host': 8,
}
_lowercase = {
'enabled': True,
'parameters': {
'microbatches': 4,
'placement_strategy': 'spread',
'pipeline': 'interleaved',
'optimize': 'speed',
'partitions': 4,
'ddp': True,
},
}
_lowercase = {'smdistributed': {'modelparallel': smp_options}, 'mpi': mpi_options}
_lowercase = 'trainer' if self.script == 'run_glue.py' else 'smtrainer'
# creates estimator
return HuggingFace(
entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" ,instance_count=__A ,instance_type=self.instance_type ,debugger_hook_config=__A ,hyperparameters={
**self.env.hyperparameters,
'model_name_or_path': self.model_name_or_path,
'max_steps': 500,
} ,metric_definitions=self.env.metric_definitions ,distribution=__A ,py_version='py36' ,)
def __UpperCAmelCase ( self : List[Any] ,__A : Any ) -> Optional[Any]:
TrainingJobAnalytics(__A ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def __UpperCAmelCase ( self : Tuple ,__A : Union[str, Any] ) -> Optional[Any]:
# create estimator
_lowercase = self.create_estimator(__A )
# run training
estimator.fit()
# result dataframe
_lowercase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_lowercase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
_lowercase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_lowercase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' ,99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" ,'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} ,__A ) | 67 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Dict = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class _lowerCamelCase( _a ):
lowercase_ : List[str] = """longformer"""
def __init__( self, lowerCamelCase = 5_12, lowerCamelCase = 2, lowerCamelCase = 1, lowerCamelCase = 0, lowerCamelCase = 2, lowerCamelCase = 3_05_22, lowerCamelCase = 7_68, lowerCamelCase = 12, lowerCamelCase = 12, lowerCamelCase = 30_72, lowerCamelCase = "gelu", lowerCamelCase = 0.1, lowerCamelCase = 0.1, lowerCamelCase = 5_12, lowerCamelCase = 2, lowerCamelCase = 0.0_2, lowerCamelCase = 1E-12, lowerCamelCase = False, **lowerCamelCase, ) -> Optional[Any]:
"""simple docstring"""
super().__init__(pad_token_id=lowerCamelCase, **lowerCamelCase)
_lowercase : List[Any] = attention_window
_lowercase : Any = sep_token_id
_lowercase : Any = bos_token_id
_lowercase : int = eos_token_id
_lowercase : str = vocab_size
_lowercase : Optional[Any] = hidden_size
_lowercase : Union[str, Any] = num_hidden_layers
_lowercase : Any = num_attention_heads
_lowercase : Dict = hidden_act
_lowercase : int = intermediate_size
_lowercase : str = hidden_dropout_prob
_lowercase : Optional[Any] = attention_probs_dropout_prob
_lowercase : str = max_position_embeddings
_lowercase : Optional[int] = type_vocab_size
_lowercase : Optional[int] = initializer_range
_lowercase : Any = layer_norm_eps
_lowercase : Tuple = onnx_export
class _lowerCamelCase( _a ):
def __init__( self, lowerCamelCase, lowerCamelCase = "default", lowerCamelCase = None) -> Dict:
"""simple docstring"""
super().__init__(lowerCamelCase, lowerCamelCase, lowerCamelCase)
_lowercase : Any = True
@property
def UpperCamelCase ( self) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_lowercase : List[str] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowercase : Optional[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('global_attention_mask', dynamic_axis),
])
@property
def UpperCamelCase ( self) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
_lowercase : Union[str, Any] = super().outputs
if self.task == "default":
_lowercase : int = {0: 'batch'}
return outputs
@property
def UpperCamelCase ( self) -> float:
"""simple docstring"""
return 1E-4
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return max(super().default_onnx_opset, 14)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = -1, lowerCamelCase = -1, lowerCamelCase = False, lowerCamelCase = None, ) -> Mapping[str, Any]:
"""simple docstring"""
_lowercase : Optional[int] = super().generate_dummy_inputs(
preprocessor=lowerCamelCase, batch_size=lowerCamelCase, seq_length=lowerCamelCase, is_pair=lowerCamelCase, framework=lowerCamelCase)
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
_lowercase : Dict = torch.zeros_like(inputs['input_ids'])
# make every second token global
_lowercase : List[str] = 1
return inputs
| 89 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json""",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''blenderbot-small'''
SCREAMING_SNAKE_CASE_ : int = ['''past_key_values''']
SCREAMING_SNAKE_CASE_ : Tuple = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[str] ,__A : List[Any]=5_0265 ,__A : str=512 ,__A : Optional[int]=8 ,__A : Any=2048 ,__A : Tuple=16 ,__A : str=8 ,__A : int=2048 ,__A : List[str]=16 ,__A : Optional[int]=0.0 ,__A : Any=0.0 ,__A : int=True ,__A : List[Any]=True ,__A : Tuple="gelu" ,__A : Any=512 ,__A : Dict=0.1 ,__A : Tuple=0.0 ,__A : int=0.0 ,__A : int=0.02 ,__A : Dict=1 ,__A : str=False ,__A : Dict=0 ,__A : Union[str, Any]=1 ,__A : Optional[int]=2 ,__A : List[str]=2 ,**__A : Tuple ,) -> Tuple:
_lowercase = vocab_size
_lowercase = max_position_embeddings
_lowercase = d_model
_lowercase = encoder_ffn_dim
_lowercase = encoder_layers
_lowercase = encoder_attention_heads
_lowercase = decoder_ffn_dim
_lowercase = decoder_layers
_lowercase = decoder_attention_heads
_lowercase = dropout
_lowercase = attention_dropout
_lowercase = activation_dropout
_lowercase = activation_function
_lowercase = init_std
_lowercase = encoder_layerdrop
_lowercase = decoder_layerdrop
_lowercase = use_cache
_lowercase = encoder_layers
_lowercase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__A ,bos_token_id=__A ,eos_token_id=__A ,is_encoder_decoder=__A ,decoder_start_token_id=__A ,forced_eos_token_id=__A ,**__A ,)
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_lowercase = {0: 'batch'}
_lowercase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
_lowercase = {0: 'batch', 1: 'decoder_sequence'}
_lowercase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__A ,direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
_lowercase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_lowercase , _lowercase = self.num_layers
for i in range(__A ):
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
else:
_lowercase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def __UpperCAmelCase ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = super().outputs
else:
_lowercase = super(__A ,self ).outputs
if self.use_past:
_lowercase , _lowercase = self.num_layers
for i in range(__A ):
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def __UpperCAmelCase ( self : Optional[int] ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,__A ,__A ,__A ,__A )
# Generate decoder inputs
_lowercase = seq_length if not self.use_past else 1
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,__A ,__A ,__A ,__A )
_lowercase = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
_lowercase = dict(**__A ,**__A )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowercase , _lowercase = common_inputs['input_ids'].shape
_lowercase = common_inputs['decoder_input_ids'].shape[1]
_lowercase , _lowercase = self.num_attention_heads
_lowercase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowercase = decoder_seq_length + 3
_lowercase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_lowercase = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(__A ,__A )] ,dim=1 )
_lowercase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_lowercase , _lowercase = self.num_layers
_lowercase = min(__A ,__A )
_lowercase = max(__A ,__A ) - min_num_layers
_lowercase = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(__A ):
common_inputs["past_key_values"].append(
(
torch.zeros(__A ),
torch.zeros(__A ),
torch.zeros(__A ),
torch.zeros(__A ),
) )
# TODO: test this.
_lowercase = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(__A ,__A ):
common_inputs["past_key_values"].append((torch.zeros(__A ), torch.zeros(__A )) )
return common_inputs
def __UpperCAmelCase ( self : List[Any] ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,__A ,__A ,__A ,__A )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowercase , _lowercase = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_lowercase = seqlen + 2
_lowercase , _lowercase = self.num_layers
_lowercase , _lowercase = self.num_attention_heads
_lowercase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowercase = common_inputs['attention_mask'].dtype
_lowercase = torch.cat(
[common_inputs['attention_mask'], torch.ones(__A ,__A ,dtype=__A )] ,dim=1 )
_lowercase = [
(torch.zeros(__A ), torch.zeros(__A )) for _ in range(__A )
]
return common_inputs
def __UpperCAmelCase ( self : Any ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowercase = compute_effective_axis_dimension(
__A ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowercase = tokenizer.num_special_tokens_to_add(__A )
_lowercase = compute_effective_axis_dimension(
__A ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=__A )
# Generate dummy inputs according to compute batch and sequence
_lowercase = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
_lowercase = dict(tokenizer(__A ,return_tensors=__A ) )
return common_inputs
def __UpperCAmelCase ( self : Dict ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A )
elif self.task == "causal-lm":
_lowercase = self._generate_dummy_inputs_for_causal_lm(
__A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A )
else:
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A )
return common_inputs
def __UpperCAmelCase ( self : List[str] ,__A : Dict ,__A : Any ,__A : List[Any] ,__A : Tuple ) -> Union[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = super()._flatten_past_key_values_(__A ,__A ,__A ,__A )
else:
_lowercase = super(__A ,self )._flatten_past_key_values_(
__A ,__A ,__A ,__A ) | 67 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class a__ :
'''simple docstring'''
lowercase__ : List[str]
lowercase__ : Optional[str] = None
# Automatically constructed
lowercase__ : ClassVar[str] = "dict"
lowercase__ : ClassVar[Any] = None
lowercase__ : str = field(default="Translation" , init=a__ , repr=a__ )
def __call__( self ) -> Any:
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def __SCREAMING_SNAKE_CASE ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class a__ :
'''simple docstring'''
lowercase__ : Optional[List] = None
lowercase__ : Optional[int] = None
lowercase__ : Optional[str] = None
# Automatically constructed
lowercase__ : ClassVar[str] = "dict"
lowercase__ : ClassVar[Any] = None
lowercase__ : str = field(default="TranslationVariableLanguages" , init=a__ , repr=a__ )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ = sorted(set(self.languages ) ) if self.languages else None
lowerCAmelCase__ = len(self.languages ) if self.languages else None
def __call__( self ) -> List[str]:
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> str:
lowerCAmelCase__ = set(self.languages )
if self.languages and set(lowerCamelCase_ ) - lang_set:
raise ValueError(
F"""Some languages in example ({", ".join(sorted(set(lowerCamelCase_ ) - lang_set ) )}) are not in valid set ({", ".join(lowerCamelCase_ )}).""" )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
lowerCAmelCase__ = []
for lang, text in translation_dict.items():
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
lowerCAmelCase__ , lowerCAmelCase__ = zip(*sorted(lowerCamelCase_ ) )
return {"language": languages, "translation": translations}
def __SCREAMING_SNAKE_CASE ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
} | 90 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : Any ) -> str:
torch.manual_seed(0 )
_lowercase = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=('DownBlock2D', 'AttnDownBlock2D') ,up_block_types=('AttnUpBlock2D', 'UpBlock2D') ,)
return model
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
_lowercase = self.dummy_uncond_unet
_lowercase = KarrasVeScheduler()
_lowercase = KarrasVePipeline(unet=__A ,scheduler=__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_lowercase = torch.manual_seed(0 )
_lowercase = pipe(num_inference_steps=2 ,generator=__A ,output_type='numpy' ).images
_lowercase = torch.manual_seed(0 )
_lowercase = pipe(num_inference_steps=2 ,generator=__A ,output_type='numpy' ,return_dict=__A )[0]
_lowercase = image[0, -3:, -3:, -1]
_lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowercase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
_lowercase = 'google/ncsnpp-celebahq-256'
_lowercase = UNetaDModel.from_pretrained(__A )
_lowercase = KarrasVeScheduler()
_lowercase = KarrasVePipeline(unet=__A ,scheduler=__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_lowercase = torch.manual_seed(0 )
_lowercase = pipe(num_inference_steps=20 ,generator=__A ,output_type='numpy' ).images
_lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_lowercase = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 67 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] ,A_ : Any ) -> List[Any]:
A = data
A = None
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : int ) -> List[str]:
A = None
A = None
def __iter__( self : List[Any] ) -> Iterator[Any]:
A = self.head
while self.head:
yield node.data
A = node.next
if node == self.head:
break
def __len__( self : List[str] ) -> int:
return sum(1 for _ in self )
def __repr__( self : Any ) -> Union[str, Any]:
return "->".join(str(A_ ) for item in iter(self ) )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Any ) -> None:
self.insert_nth(len(self ) ,A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Any ) -> None:
self.insert_nth(0 ,A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : int ,A_ : Any ) -> None:
if index < 0 or index > len(self ):
raise IndexError('list index out of range.' )
A = Node(A_ )
if self.head is None:
A = new_node # first node points itself
A = A = new_node
elif index == 0: # insert at head
A = self.head
A = A = new_node
else:
A = self.head
for _ in range(index - 1 ):
A = temp.next
A = temp.next
A = new_node
if index == len(self ) - 1: # insert at tail
A = new_node
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
return self.delete_nth(0 )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
return self.delete_nth(len(self ) - 1 )
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : int = 0 ) -> Any:
if not 0 <= index < len(self ):
raise IndexError('list index out of range.' )
A = self.head
if self.head == self.tail: # just one node
A = A = None
elif index == 0: # delete head node
A = self.tail.next.next
A = self.head.next
else:
A = self.head
for _ in range(index - 1 ):
A = temp.next
A = temp.next
A = temp.next.next
if index == len(self ) - 1: # delete at tail
A = temp
return delete_node.data
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> bool:
return len(self ) == 0
def _snake_case ( ):
A = CircularLinkedList()
assert len(snake_case__ ) == 0
assert circular_linked_list.is_empty() is True
assert str(snake_case__ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(snake_case__ ) == i
circular_linked_list.insert_nth(snake_case__ , i + 1 )
assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod() | 91 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str , snake_case__ :str ) -> list:
_lowercase = len(snake_case__ )
_lowercase = []
for i in range(len(snake_case__ ) - pat_len + 1 ):
_lowercase = True
for j in range(snake_case__ ):
if s[i + j] != pattern[j]:
_lowercase = False
break
if match_found:
position.append(snake_case__ )
return position
if __name__ == "__main__":
assert naive_pattern_search("""ABCDEFG""", """DE""") == [3]
print(naive_pattern_search("""ABAAABCDBBABCDDEBCABC""", """ABC""")) | 67 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
UpperCamelCase_ = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : str , __magic_name__ : Tuple ) -> List[Any]:
for attribute in key.split('''.''' ):
lowercase : List[str] =getattr(__magic_name__ , __magic_name__ )
if weight_type is not None:
lowercase : Union[str, Any] =getattr(__magic_name__ , __magic_name__ ).shape
else:
lowercase : Union[str, Any] =hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowercase : Tuple =value
elif weight_type == "weight_g":
lowercase : Union[str, Any] =value
elif weight_type == "weight_v":
lowercase : Any =value
elif weight_type == "bias":
lowercase : Dict =value
else:
lowercase : Union[str, Any] =value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def _lowerCAmelCase ( __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any] ) -> Union[str, Any]:
lowercase : Optional[int] =[]
lowercase : Union[str, Any] =fairseq_model.state_dict()
lowercase : Tuple =hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
lowercase : Union[str, Any] =None
for name, value in fairseq_dict.items():
lowercase : Any =False
if "conv_layers" in name:
load_conv_layer(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , hf_model.config.feat_extract_norm == '''group''' , )
lowercase : Tuple =True
elif name.split('''.''' )[0] == "proj":
lowercase : List[Any] =fairseq_model.proj
lowercase : Union[str, Any] =True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
lowercase : Tuple =True
if "*" in mapped_key:
lowercase : Any =name.split(__magic_name__ )[0].split('''.''' )[-2]
lowercase : Dict =mapped_key.replace('''*''' , __magic_name__ )
if "weight_g" in name:
lowercase : Optional[int] ='''weight_g'''
elif "weight_v" in name:
lowercase : Dict ='''weight_v'''
elif "bias" in name:
lowercase : Optional[Any] ='''bias'''
elif "weight" in name:
lowercase : Any ='''weight'''
else:
lowercase : Dict =None
set_recursively(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
continue
if not is_used:
unused_weights.append(__magic_name__ )
logger.warning(f'''Unused weights: {unused_weights}''' )
return proj_weight
def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : Any ) -> str:
lowercase : str =full_name.split('''conv_layers.''' )[-1]
lowercase : Optional[Any] =name.split('''.''' )
lowercase : List[Any] =int(items[0] )
lowercase : Optional[Any] =int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowercase : Any =value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowercase : Optional[int] =value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowercase : Dict =value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowercase : List[str] =value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__magic_name__ )
def _lowerCAmelCase ( __magic_name__ : str ) -> Optional[int]:
lowercase , lowercase : List[Any] =emb.weight.shape
lowercase : Optional[Any] =nn.Linear(__magic_name__ , __magic_name__ , bias=__magic_name__ )
lowercase : Any =emb.weight.data
return lin_layer
def _lowerCAmelCase ( __magic_name__ : int ) -> Optional[int]:
with open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as f:
lowercase : Dict =f.readlines()
lowercase : Optional[int] =[line.split(''' ''' )[0] for line in lines]
lowercase : List[Any] =len(__magic_name__ )
lowercase : List[str] ={
'''<s>''': 0,
'''<pad>''': 1,
'''</s>''': 2,
'''<unk>''': 3,
}
vocab_dict.update(dict(zip(__magic_name__ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def _lowerCAmelCase ( __magic_name__ : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : int , ) -> List[str]:
lowercase : List[str] =WavaVecaConfig.from_pretrained(__magic_name__ )
lowercase : int =SpeechaTextaConfig.from_pretrained(
__magic_name__ , vocab_size=__magic_name__ , decoder_layers=__magic_name__ , do_stable_layer_norm=__magic_name__ )
lowercase : Optional[int] =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__magic_name__ , return_attention_mask=__magic_name__ , )
lowercase , lowercase , lowercase : Dict =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
lowercase : Union[str, Any] =model[0].eval()
# set weights for wav2vec2 encoder
lowercase : Union[str, Any] =WavaVecaModel(__magic_name__ )
lowercase : Union[str, Any] =recursively_load_weights_wavaveca(model.encoder , __magic_name__ )
lowercase : Optional[int] =SpeechaTextaForCausalLM(__magic_name__ )
lowercase , lowercase : Union[str, Any] =hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__magic_name__ )
# set output linear layer
unexpected_keys.remove('''embed_out''' )
lowercase : List[Any] =nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(f'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
lowercase : Optional[Any] =SpeechEncoderDecoderModel(encoder=__magic_name__ , decoder=__magic_name__ )
lowercase : Tuple =False
# add projection layer
lowercase : Union[str, Any] =nn.Parameter(projection_layer.weight )
lowercase : Dict =nn.Parameter(projection_layer.bias )
lowercase : str =create_vocab_dict(__magic_name__ )
with open(os.path.join(__magic_name__ , '''vocab.json''' ) , '''w''' ) as fp:
json.dump(__magic_name__ , __magic_name__ )
lowercase : Dict =SpeechaTextaTokenizer(os.path.join(__magic_name__ , '''vocab.json''' ) )
tokenizer.save_pretrained(__magic_name__ )
lowercase : Optional[Any] =hf_wavavec.config.to_dict()
lowercase : List[Any] =tokenizer.pad_token_id
lowercase : Any =tokenizer.bos_token_id
lowercase : Any =tokenizer.eos_token_id
lowercase : int ='''speech_to_text_2'''
lowercase : int ='''wav2vec2'''
lowercase : Optional[Any] =SpeechEncoderDecoderConfig.from_dict(__magic_name__ )
hf_wavavec.save_pretrained(__magic_name__ )
feature_extractor.save_pretrained(__magic_name__ )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-large-lv60""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/s2t-small-mustc-en-fr-st""",
type=str,
help="""Path to hf decoder s2t checkpoint config""",
)
parser.add_argument("""--vocab_size""", default=10224, type=int, help="""Vocab size of decoder""")
parser.add_argument("""--num_decoder_layers""", default=7, type=int, help="""Number of decoder layers""")
UpperCamelCase_ = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 92 |
from typing import Any
import numpy as np
def SCREAMING_SNAKE_CASE__ ( snake_case__ :np.ndarray ) -> bool:
return np.array_equal(snake_case__ , matrix.conjugate().T )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :np.ndarray , snake_case__ :np.ndarray ) -> Any:
_lowercase = v.conjugate().T
_lowercase = v_star.dot(snake_case__ )
assert isinstance(snake_case__ , np.ndarray )
return (v_star_dot.dot(snake_case__ )) / (v_star.dot(snake_case__ ))
def SCREAMING_SNAKE_CASE__ ( ) -> None:
_lowercase = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
_lowercase = np.array([[1], [2], [3]] )
assert is_hermitian(snake_case__ ), F"""{a} is not hermitian."""
print(rayleigh_quotient(snake_case__ , snake_case__ ) )
_lowercase = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(snake_case__ ), F"""{a} is not hermitian."""
assert rayleigh_quotient(snake_case__ , snake_case__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests() | 67 | 0 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
__magic_name__ :Optional[int] = MODEL_FOR_CAUSAL_LM_MAPPING
__magic_name__ :Optional[Any] = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='pt' )
# Using `do_sample=False` to force deterministic output
lowerCAmelCase__ :Union[str, Any] = text_generator('This is a test' , do_sample=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
{
'generated_text': (
'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'
' oscope. FiliFili@@'
)
}
] , )
lowerCAmelCase__ :Optional[int] = text_generator(['This is a test', 'This is a second test'] )
self.assertEqual(
__UpperCAmelCase , [
[
{
'generated_text': (
'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'
' oscope. FiliFili@@'
)
}
],
[
{
'generated_text': (
'This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'
' oscope. oscope. FiliFili@@'
)
}
],
] , )
lowerCAmelCase__ :Optional[int] = text_generator('This is a test' , do_sample=__UpperCAmelCase , num_return_sequences=2 , return_tensors=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
{'generated_token_ids': ANY(__UpperCAmelCase )},
{'generated_token_ids': ANY(__UpperCAmelCase )},
] , )
lowerCAmelCase__ :Optional[int] = text_generator.model.config.eos_token_id
lowerCAmelCase__ :int = '<pad>'
lowerCAmelCase__ :Union[str, Any] = text_generator(
['This is a test', 'This is a second test'] , do_sample=__UpperCAmelCase , num_return_sequences=2 , batch_size=2 , return_tensors=__UpperCAmelCase , )
self.assertEqual(
__UpperCAmelCase , [
[
{'generated_token_ids': ANY(__UpperCAmelCase )},
{'generated_token_ids': ANY(__UpperCAmelCase )},
],
[
{'generated_token_ids': ANY(__UpperCAmelCase )},
{'generated_token_ids': ANY(__UpperCAmelCase )},
],
] , )
@require_tf
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='tf' )
# Using `do_sample=False` to force deterministic output
lowerCAmelCase__ :str = text_generator('This is a test' , do_sample=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
{
'generated_text': (
'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'
' please,'
)
}
] , )
lowerCAmelCase__ :Union[str, Any] = text_generator(['This is a test', 'This is a second test'] , do_sample=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
[
{
'generated_text': (
'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'
' please,'
)
}
],
[
{
'generated_text': (
'This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'
' Cannes 閲閲Cannes Cannes Cannes 攵 please,'
)
}
],
] , )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = TextGenerationPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
return text_generator, ["This is a test", "Another test"]
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = 'Hello I believe in'
lowerCAmelCase__ :Any = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' )
lowerCAmelCase__ :Union[str, Any] = text_generator(__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [{'generated_text': 'Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'}] , )
lowerCAmelCase__ :Optional[int] = text_generator(__UpperCAmelCase , stop_sequence=' fe' )
self.assertEqual(__UpperCAmelCase , [{'generated_text': 'Hello I believe in fe'}] )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Any = text_generator.model
lowerCAmelCase__ :Dict = text_generator.tokenizer
lowerCAmelCase__ :Any = text_generator('This is a test' )
self.assertEqual(__UpperCAmelCase , [{'generated_text': ANY(__UpperCAmelCase )}] )
self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) )
lowerCAmelCase__ :Dict = text_generator('This is a test' , return_full_text=__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , [{'generated_text': ANY(__UpperCAmelCase )}] )
self.assertNotIn('This is a test' , outputs[0]['generated_text'] )
lowerCAmelCase__ :str = pipeline(task='text-generation' , model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , return_full_text=__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = text_generator('This is a test' )
self.assertEqual(__UpperCAmelCase , [{'generated_text': ANY(__UpperCAmelCase )}] )
self.assertNotIn('This is a test' , outputs[0]['generated_text'] )
lowerCAmelCase__ :Optional[Any] = text_generator('This is a test' , return_full_text=__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , [{'generated_text': ANY(__UpperCAmelCase )}] )
self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) )
lowerCAmelCase__ :int = text_generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
[{'generated_text': ANY(__UpperCAmelCase )}, {'generated_text': ANY(__UpperCAmelCase )}],
[{'generated_text': ANY(__UpperCAmelCase )}, {'generated_text': ANY(__UpperCAmelCase )}],
] , )
if text_generator.tokenizer.pad_token is not None:
lowerCAmelCase__ :Optional[Any] = text_generator(
['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
[{'generated_text': ANY(__UpperCAmelCase )}, {'generated_text': ANY(__UpperCAmelCase )}],
[{'generated_text': ANY(__UpperCAmelCase )}, {'generated_text': ANY(__UpperCAmelCase )}],
] , )
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ :List[Any] = text_generator('test' , return_full_text=__UpperCAmelCase , return_text=__UpperCAmelCase )
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ :Union[str, Any] = text_generator('test' , return_full_text=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ :str = text_generator('test' , return_text=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
lowerCAmelCase__ :Dict = text_generator('' )
self.assertEqual(__UpperCAmelCase , [{'generated_text': ANY(__UpperCAmelCase )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
lowerCAmelCase__ :str = text_generator('' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
lowerCAmelCase__ :int = ['RwkvForCausalLM', 'XGLMForCausalLM', 'GPTNeoXForCausalLM']
if (
tokenizer.model_max_length < 1_0_0_0_0
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('This is a test' * 5_0_0 , max_new_tokens=2_0 )
lowerCAmelCase__ :int = text_generator('This is a test' * 5_0_0 , handle_long_generation='hole' , max_new_tokens=2_0 )
# Hole strategy cannot work
with self.assertRaises(__UpperCAmelCase ):
text_generator(
'This is a test' * 5_0_0 , handle_long_generation='hole' , max_new_tokens=tokenizer.model_max_length + 1_0 , )
@require_torch
@require_accelerate
@require_torch_gpu
def snake_case ( self ):
'''simple docstring'''
import torch
# Classic `model_kwargs`
lowerCAmelCase__ :Optional[int] = pipeline(
model='hf-internal-testing/tiny-random-bloom' , model_kwargs={'device_map': 'auto', 'torch_dtype': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowerCAmelCase__ :Dict = pipe('This is a test' )
self.assertEqual(
__UpperCAmelCase , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
lowerCAmelCase__ :Union[str, Any] = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowerCAmelCase__ :Union[str, Any] = pipe('This is a test' )
self.assertEqual(
__UpperCAmelCase , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
lowerCAmelCase__ :List[str] = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
lowerCAmelCase__ :Any = pipe('This is a test' )
self.assertEqual(
__UpperCAmelCase , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
@require_torch
@require_torch_gpu
def snake_case ( self ):
'''simple docstring'''
import torch
lowerCAmelCase__ :List[str] = pipeline(model='hf-internal-testing/tiny-random-bloom' , device=0 , torch_dtype=torch.floataa )
pipe('This is a test' )
@require_torch
@require_accelerate
@require_torch_gpu
def snake_case ( self ):
'''simple docstring'''
import torch
lowerCAmelCase__ :Union[str, Any] = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.floataa )
pipe('This is a test' , do_sample=__UpperCAmelCase , top_p=0.5 )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = 'Hello world'
lowerCAmelCase__ :Optional[Any] = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' )
if text_generator.model.framework == "tf":
lowerCAmelCase__ :Any = logging.get_logger('transformers.generation.tf_utils' )
else:
lowerCAmelCase__ :List[str] = logging.get_logger('transformers.generation.utils' )
lowerCAmelCase__ :Union[str, Any] = 'Both `max_new_tokens`' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(__UpperCAmelCase ) as cl:
lowerCAmelCase__ :Optional[Any] = text_generator(__UpperCAmelCase , max_length=1_0 , max_new_tokens=1 )
self.assertIn(__UpperCAmelCase , cl.out )
# The user only sets one -> no warning
with CaptureLogger(__UpperCAmelCase ) as cl:
lowerCAmelCase__ :List[str] = text_generator(__UpperCAmelCase , max_new_tokens=1 )
self.assertNotIn(__UpperCAmelCase , cl.out )
with CaptureLogger(__UpperCAmelCase ) as cl:
lowerCAmelCase__ :Dict = text_generator(__UpperCAmelCase , max_length=1_0 )
self.assertNotIn(__UpperCAmelCase , cl.out )
| 93 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple ,__A : Dict ,__A : List[Any]=7 ,__A : Dict=3 ,__A : Tuple=30 ,__A : Dict=400 ,__A : Any=True ,__A : List[Any]=None ,__A : Any=True ,__A : List[str]=[0.5, 0.5, 0.5] ,__A : Union[str, Any]=[0.5, 0.5, 0.5] ,__A : int=True ,__A : List[str]=1 / 255 ,__A : Union[str, Any]=True ,) -> List[Any]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_lowercase = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
_lowercase = parent
_lowercase = batch_size
_lowercase = num_channels
_lowercase = min_resolution
_lowercase = max_resolution
_lowercase = do_resize
_lowercase = size
_lowercase = do_normalize
_lowercase = image_mean
_lowercase = image_std
_lowercase = do_rescale
_lowercase = rescale_factor
_lowercase = do_pad
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __UpperCAmelCase ( self : Tuple ,__A : Union[str, Any] ,__A : List[str]=False ) -> Union[str, Any]:
if not batched:
_lowercase = image_inputs[0]
if isinstance(__A ,Image.Image ):
_lowercase , _lowercase = image.size
else:
_lowercase , _lowercase = image.shape[1], image.shape[2]
if w < h:
_lowercase = int(self.size['shortest_edge'] * h / w )
_lowercase = self.size['shortest_edge']
elif w > h:
_lowercase = self.size['shortest_edge']
_lowercase = int(self.size['shortest_edge'] * w / h )
else:
_lowercase = self.size['shortest_edge']
_lowercase = self.size['shortest_edge']
else:
_lowercase = []
for image in image_inputs:
_lowercase , _lowercase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_lowercase = max(__A ,key=lambda __A : item[0] )[0]
_lowercase = max(__A ,key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A_ ( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = DetaImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
_lowercase = DetaImageProcessingTester(self )
@property
def __UpperCAmelCase ( self : List[Any] ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
_lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A ,'image_mean' ) )
self.assertTrue(hasattr(__A ,'image_std' ) )
self.assertTrue(hasattr(__A ,'do_normalize' ) )
self.assertTrue(hasattr(__A ,'do_resize' ) )
self.assertTrue(hasattr(__A ,'do_rescale' ) )
self.assertTrue(hasattr(__A ,'do_pad' ) )
self.assertTrue(hasattr(__A ,'size' ) )
def __UpperCAmelCase ( self : str ) -> List[str]:
_lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad ,__A )
def __UpperCAmelCase ( self : List[Any] ) -> Any:
pass
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A ,Image.Image )
# Test not batched input
_lowercase = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A ,batched=__A )
_lowercase = image_processing(__A ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__A ,numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A ,np.ndarray )
# Test not batched input
_lowercase = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
_lowercase = image_processing(__A ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A ,batched=__A )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def __UpperCAmelCase ( self : Optional[Any] ) -> Any:
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__A ,torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A ,torch.Tensor )
# Test not batched input
_lowercase = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
_lowercase = image_processing(__A ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A ,batched=__A )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
@slow
def __UpperCAmelCase ( self : List[str] ) -> List[str]:
# prepare image and target
_lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' ,'r' ) as f:
_lowercase = json.loads(f.read() )
_lowercase = {'image_id': 3_9769, 'annotations': target}
# encode them
_lowercase = DetaImageProcessor()
_lowercase = image_processing(images=__A ,annotations=__A ,return_tensors='pt' )
# verify pixel values
_lowercase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape ,__A )
_lowercase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,__A ,atol=1e-4 ) )
# verify area
_lowercase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,__A ) )
# verify boxes
_lowercase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape ,__A )
_lowercase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,__A ,atol=1e-3 ) )
# verify image_id
_lowercase = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,__A ) )
# verify is_crowd
_lowercase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,__A ) )
# verify class_labels
_lowercase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,__A ) )
# verify orig_size
_lowercase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,__A ) )
# verify size
_lowercase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,__A ) )
@slow
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
# prepare image, target and masks_path
_lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' ,'r' ) as f:
_lowercase = json.loads(f.read() )
_lowercase = {'file_name': '000000039769.png', 'image_id': 3_9769, 'segments_info': target}
_lowercase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
_lowercase = DetaImageProcessor(format='coco_panoptic' )
_lowercase = image_processing(images=__A ,annotations=__A ,masks_path=__A ,return_tensors='pt' )
# verify pixel values
_lowercase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape ,__A )
_lowercase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,__A ,atol=1e-4 ) )
# verify area
_lowercase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,__A ) )
# verify boxes
_lowercase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape ,__A )
_lowercase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,__A ,atol=1e-3 ) )
# verify image_id
_lowercase = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,__A ) )
# verify is_crowd
_lowercase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,__A ) )
# verify class_labels
_lowercase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,__A ) )
# verify masks
_lowercase = 82_2873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() ,__A )
# verify orig_size
_lowercase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,__A ) )
# verify size
_lowercase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,__A ) ) | 67 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict=13 , UpperCAmelCase : Optional[Any]=7 , UpperCAmelCase : Tuple=True , UpperCAmelCase : Dict=True , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : List[str]=99 , UpperCAmelCase : Any=32 , UpperCAmelCase : int=5 , UpperCAmelCase : Tuple=4 , UpperCAmelCase : Dict=37 , UpperCAmelCase : List[str]="gelu" , UpperCAmelCase : Any=0.1 , UpperCAmelCase : Union[str, Any]=0.1 , UpperCAmelCase : List[str]=512 , UpperCAmelCase : List[str]=16 , UpperCAmelCase : str=2 , UpperCAmelCase : int=0.0_2 , UpperCAmelCase : int=4 , ) -> Optional[Any]:
'''simple docstring'''
lowercase : int =parent
lowercase : Tuple =batch_size
lowercase : List[Any] =seq_length
lowercase : List[Any] =is_training
lowercase : str =use_attention_mask
lowercase : str =use_token_type_ids
lowercase : Optional[Any] =use_labels
lowercase : Optional[Any] =vocab_size
lowercase : Dict =hidden_size
lowercase : Optional[int] =num_hidden_layers
lowercase : Any =num_attention_heads
lowercase : Dict =intermediate_size
lowercase : int =hidden_act
lowercase : List[str] =hidden_dropout_prob
lowercase : Optional[Any] =attention_probs_dropout_prob
lowercase : Any =max_position_embeddings
lowercase : Any =type_vocab_size
lowercase : Tuple =type_sequence_label_size
lowercase : int =initializer_range
lowercase : int =num_choices
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
lowercase : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Optional[Any] =None
if self.use_attention_mask:
lowercase : Tuple =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Union[str, Any] =None
if self.use_token_type_ids:
lowercase : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : Union[str, Any] =AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def A__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
lowercase : Optional[int] =self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase : int =config_and_inputs
lowercase : int ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase_ ( __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def A__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
lowercase : Optional[int] =FlaxAlbertModelTester(self )
@slow
def A__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase : int =model_class_name.from_pretrained('''albert-base-v2''' )
lowercase : Optional[Any] =model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase )
@require_flax
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
lowercase : List[str] =FlaxAlbertModel.from_pretrained('''albert-base-v2''' )
lowercase : Tuple =np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowercase : str =np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowercase : Dict =model(UpperCAmelCase , attention_mask=UpperCAmelCase )[0]
lowercase : Any =(1, 11, 768)
self.assertEqual(output.shape , UpperCAmelCase )
lowercase : int =np.array(
[[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , UpperCAmelCase , atol=1e-4 ) )
| 94 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("""At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training""")
# TF training parameters
snake_case = False
snake_case = False
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Namespace ) -> Tuple:
return TrainCommand(snake_case__ )
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@staticmethod
def __UpperCAmelCase ( __A : ArgumentParser ) -> List[Any]:
_lowercase = parser.add_parser('train' ,help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data' ,type=__A ,required=__A ,help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' ,)
train_parser.add_argument(
'--column_label' ,type=__A ,default=0 ,help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text' ,type=__A ,default=1 ,help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id' ,type=__A ,default=2 ,help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row' ,action='store_true' ,help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data' ,type=__A ,default='' ,help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split' ,type=__A ,default=0.1 ,help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' ,)
train_parser.add_argument('--output' ,type=__A ,default='./' ,help='path to saved the trained model.' )
train_parser.add_argument(
'--task' ,type=__A ,default='text_classification' ,help='Task to train the model on.' )
train_parser.add_argument(
'--model' ,type=__A ,default='bert-base-uncased' ,help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size' ,type=__A ,default=32 ,help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size' ,type=__A ,default=64 ,help='Batch size for validation.' )
train_parser.add_argument('--learning_rate' ,type=__A ,default=3e-5 ,help='Learning rate.' )
train_parser.add_argument('--adam_epsilon' ,type=__A ,default=1e-08 ,help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=__A )
def __init__( self : Optional[Any] ,__A : Namespace ) -> Tuple:
_lowercase = logging.get_logger('transformers-cli/training' )
_lowercase = 'tf' if is_tf_available() else 'torch'
os.makedirs(args.output ,exist_ok=__A )
_lowercase = args.output
_lowercase = args.column_label
_lowercase = args.column_text
_lowercase = args.column_id
self.logger.info(F"""Loading {args.task} pipeline for {args.model}""" )
if args.task == "text_classification":
_lowercase = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F"""Loading dataset from {args.train_data}""" )
_lowercase = Processor.create_from_csv(
args.train_data ,column_label=args.column_label ,column_text=args.column_text ,column_id=args.column_id ,skip_first_row=args.skip_first_row ,)
_lowercase = None
if args.validation_data:
self.logger.info(F"""Loading validation dataset from {args.validation_data}""" )
_lowercase = Processor.create_from_csv(
args.validation_data ,column_label=args.column_label ,column_text=args.column_text ,column_id=args.column_id ,skip_first_row=args.skip_first_row ,)
_lowercase = args.validation_split
_lowercase = args.train_batch_size
_lowercase = args.valid_batch_size
_lowercase = args.learning_rate
_lowercase = args.adam_epsilon
def __UpperCAmelCase ( self : Optional[Any] ) -> str:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
raise NotImplementedError
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
self.pipeline.fit(
self.train_dataset ,validation_data=self.valid_dataset ,validation_split=self.validation_split ,learning_rate=self.learning_rate ,adam_epsilon=self.adam_epsilon ,train_batch_size=self.train_batch_size ,valid_batch_size=self.valid_batch_size ,)
# Save trained pipeline
self.pipeline.save_pretrained(self.output ) | 67 | 0 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCamelCase_ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''')
lowerCamelCase_ = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
lowerCamelCase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCamelCase_ :
__magic_name__ = field(
default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
__magic_name__ = field(
default=__A , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
__magic_name__ = field(
default=__A , metadata={'''help''': '''The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'''} , )
__magic_name__ = field(default=__A , metadata={'''help''': '''A folder containing the training data.'''} )
__magic_name__ = field(default=__A , metadata={'''help''': '''A folder containing the validation data.'''} )
__magic_name__ = field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
__magic_name__ = field(default=32 , metadata={'''help''': '''The size of the square patches to use for masking.'''} )
__magic_name__ = field(
default=0.6 , metadata={'''help''': '''Percentage of patches to mask.'''} , )
__magic_name__ = field(
default=__A , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
__magic_name__ = field(
default=__A , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = {}
if self.train_dir is not None:
UpperCAmelCase_ : int = self.train_dir
if self.validation_dir is not None:
UpperCAmelCase_ : Optional[int] = self.validation_dir
UpperCAmelCase_ : str = data_files if data_files else None
@dataclass
class UpperCamelCase_ :
__magic_name__ = field(
default=__A , metadata={
'''help''': (
'''The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a '''
'''checkpoint identifier on the hub. '''
'''Don\'t set if you want to train a model from scratch.'''
)
} , )
__magic_name__ = field(
default=__A , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(__A )} , )
__magic_name__ = field(
default=__A , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__magic_name__ = field(
default=__A , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
__magic_name__ = field(
default=__A , metadata={'''help''': '''Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'''} , )
__magic_name__ = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
__magic_name__ = field(default=__A , metadata={'''help''': '''Name or path of preprocessor config.'''} )
__magic_name__ = field(
default=__A , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
__magic_name__ = field(
default=__A , metadata={
'''help''': (
'''The size (resolution) of each image. If not specified, will use `image_size` of the configuration.'''
)
} , )
__magic_name__ = field(
default=__A , metadata={
'''help''': (
'''The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.'''
)
} , )
__magic_name__ = field(
default=__A , metadata={'''help''': '''Stride to use for the encoder.'''} , )
class UpperCamelCase_ :
def __init__( self : Tuple , lowerCAmelCase_ : Any=192 , lowerCAmelCase_ : Union[str, Any]=32 , lowerCAmelCase_ : Tuple=4 , lowerCAmelCase_ : List[Any]=0.6 ) -> Optional[Any]:
UpperCAmelCase_ : Optional[int] = input_size
UpperCAmelCase_ : str = mask_patch_size
UpperCAmelCase_ : Optional[int] = model_patch_size
UpperCAmelCase_ : Dict = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError("Input size must be divisible by mask patch size" )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError("Mask patch size must be divisible by model patch size" )
UpperCAmelCase_ : Dict = self.input_size // self.mask_patch_size
UpperCAmelCase_ : Any = self.mask_patch_size // self.model_patch_size
UpperCAmelCase_ : Any = self.rand_size**2
UpperCAmelCase_ : Union[str, Any] = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self : str ) -> Union[str, Any]:
UpperCAmelCase_ : int = np.random.permutation(self.token_count )[: self.mask_count]
UpperCAmelCase_ : int = np.zeros(self.token_count , dtype=lowerCAmelCase_ )
UpperCAmelCase_ : Dict = 1
UpperCAmelCase_ : Dict = mask.reshape((self.rand_size, self.rand_size) )
UpperCAmelCase_ : Dict = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def snake_case ( A__ ):
UpperCAmelCase_ : Optional[int] = torch.stack([example["pixel_values"] for example in examples] )
UpperCAmelCase_ : List[Any] = torch.stack([example["mask"] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def snake_case ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase_ : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_mim" ,A__ ,A__ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" ,datefmt="%m/%d/%Y %H:%M:%S" ,handlers=[logging.StreamHandler(sys.stdout )] ,)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCAmelCase_ : Optional[Any] = training_args.get_process_log_level()
logger.setLevel(A__ )
transformers.utils.logging.set_verbosity(A__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
UpperCAmelCase_ : Optional[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase_ : Tuple = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset.
UpperCAmelCase_ : List[str] = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,data_files=data_args.data_files ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
# If we don't have a validation split, split off a percentage of train as validation.
UpperCAmelCase_ : Union[str, Any] = None if "validation" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split ,A__ ) and data_args.train_val_split > 0.0:
UpperCAmelCase_ : Optional[Any] = ds["train"].train_test_split(data_args.train_val_split )
UpperCAmelCase_ : List[str] = split["train"]
UpperCAmelCase_ : Union[str, Any] = split["test"]
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase_ : List[str] = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
UpperCAmelCase_ : Any = AutoConfig.from_pretrained(model_args.config_name_or_path ,**A__ )
elif model_args.model_name_or_path:
UpperCAmelCase_ : Any = AutoConfig.from_pretrained(model_args.model_name_or_path ,**A__ )
else:
UpperCAmelCase_ : Optional[int] = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(A__ ,"decoder_type" ):
UpperCAmelCase_ : Tuple = "simmim"
# adapt config
UpperCAmelCase_ : Optional[int] = model_args.image_size if model_args.image_size is not None else config.image_size
UpperCAmelCase_ : int = model_args.patch_size if model_args.patch_size is not None else config.patch_size
UpperCAmelCase_ : Optional[Any] = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
"image_size": model_args.image_size,
"patch_size": model_args.patch_size,
"encoder_stride": model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
UpperCAmelCase_ : Tuple = AutoImageProcessor.from_pretrained(model_args.image_processor_name ,**A__ )
elif model_args.model_name_or_path:
UpperCAmelCase_ : List[str] = AutoImageProcessor.from_pretrained(model_args.model_name_or_path ,**A__ )
else:
UpperCAmelCase_ : Dict = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
UpperCAmelCase_ : Tuple = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
UpperCAmelCase_ : Union[str, Any] = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path ,from_tf=bool(".ckpt" in model_args.model_name_or_path ) ,config=A__ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
else:
logger.info("Training new model from scratch" )
UpperCAmelCase_ : List[str] = AutoModelForMaskedImageModeling.from_config(A__ )
if training_args.do_train:
UpperCAmelCase_ : List[str] = ds["train"].column_names
else:
UpperCAmelCase_ : str = ds["validation"].column_names
if data_args.image_column_name is not None:
UpperCAmelCase_ : Any = data_args.image_column_name
elif "image" in column_names:
UpperCAmelCase_ : Tuple = "image"
elif "img" in column_names:
UpperCAmelCase_ : List[str] = "img"
else:
UpperCAmelCase_ : Optional[Any] = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
UpperCAmelCase_ : Optional[int] = Compose(
[
Lambda(lambda A__ : img.convert("RGB" ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size ,scale=(0.67, 1.0) ,ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean ,std=image_processor.image_std ),
] )
# create mask generator
UpperCAmelCase_ : Optional[Any] = MaskGenerator(
input_size=model_args.image_size ,mask_patch_size=data_args.mask_patch_size ,model_patch_size=model_args.patch_size ,mask_ratio=data_args.mask_ratio ,)
def preprocess_images(A__ ):
UpperCAmelCase_ : Union[str, Any] = [transforms(A__ ) for image in examples[image_column_name]]
UpperCAmelCase_ : Any = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
UpperCAmelCase_ : Optional[Any] = ds["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(A__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
UpperCAmelCase_ : List[str] = (
ds["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(A__ )
# Initialize our trainer
UpperCAmelCase_ : List[Any] = Trainer(
model=A__ ,args=A__ ,train_dataset=ds["train"] if training_args.do_train else None ,eval_dataset=ds["validation"] if training_args.do_eval else None ,tokenizer=A__ ,data_collator=A__ ,)
# Training
if training_args.do_train:
UpperCAmelCase_ : List[str] = None
if training_args.resume_from_checkpoint is not None:
UpperCAmelCase_ : int = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCAmelCase_ : int = last_checkpoint
UpperCAmelCase_ : Any = trainer.train(resume_from_checkpoint=A__ )
trainer.save_model()
trainer.log_metrics("train" ,train_result.metrics )
trainer.save_metrics("train" ,train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCAmelCase_ : Optional[Any] = trainer.evaluate()
trainer.log_metrics("eval" ,A__ )
trainer.save_metrics("eval" ,A__ )
# Write model card and (optionally) push to hub
UpperCAmelCase_ : int = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "masked-image-modeling",
"dataset": data_args.dataset_name,
"tags": ["masked-image-modeling"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**A__ )
else:
trainer.create_model_card(**A__ )
if __name__ == "__main__":
main()
| 95 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Any ) -> str:
_lowercase = DPTConfig(embedding_type='hybrid' )
if "large" in checkpoint_url:
_lowercase = 1024
_lowercase = 4096
_lowercase = 24
_lowercase = 16
_lowercase = [5, 11, 17, 23]
_lowercase = [256, 512, 1024, 1024]
_lowercase = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
_lowercase = 768
_lowercase = [1, 1, 1, 0.5]
_lowercase = [256, 512, 768, 768]
_lowercase = 150
_lowercase = 16
_lowercase = (1, 384, 384)
_lowercase = False
_lowercase = 'project'
if "ade" in checkpoint_url:
_lowercase = True
_lowercase = 768
_lowercase = [1, 1, 1, 0.5]
_lowercase = 150
_lowercase = 16
_lowercase = 'huggingface/label-files'
_lowercase = 'ade20k-id2label.json'
_lowercase = json.load(open(cached_download(hf_hub_url(snake_case__ , snake_case__ , repo_type='dataset' ) ) , 'r' ) )
_lowercase = {int(snake_case__ ): v for k, v in idalabel.items()}
_lowercase = idalabel
_lowercase = {v: k for k, v in idalabel.items()}
_lowercase = [1, 150, 480, 480]
return config, expected_shape
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> str:
_lowercase = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[Any] ) -> Any:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
_lowercase = name.replace('pretrained.model' , 'dpt.encoder' )
if "pretrained.model" in name:
_lowercase = name.replace('pretrained.model' , 'dpt.embeddings' )
if "patch_embed" in name:
_lowercase = name.replace('patch_embed' , '' )
if "pos_embed" in name:
_lowercase = name.replace('pos_embed' , 'position_embeddings' )
if "attn.proj" in name:
_lowercase = name.replace('attn.proj' , 'attention.output.dense' )
if "proj" in name and "project" not in name:
_lowercase = name.replace('proj' , 'projection' )
if "blocks" in name:
_lowercase = name.replace('blocks' , 'layer' )
if "mlp.fc1" in name:
_lowercase = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_lowercase = name.replace('mlp.fc2' , 'output.dense' )
if "norm1" in name and "backbone" not in name:
_lowercase = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name and "backbone" not in name:
_lowercase = name.replace('norm2' , 'layernorm_after' )
if "scratch.output_conv" in name:
_lowercase = name.replace('scratch.output_conv' , 'head' )
if "scratch" in name:
_lowercase = name.replace('scratch' , 'neck' )
if "layer1_rn" in name:
_lowercase = name.replace('layer1_rn' , 'convs.0' )
if "layer2_rn" in name:
_lowercase = name.replace('layer2_rn' , 'convs.1' )
if "layer3_rn" in name:
_lowercase = name.replace('layer3_rn' , 'convs.2' )
if "layer4_rn" in name:
_lowercase = name.replace('layer4_rn' , 'convs.3' )
if "refinenet" in name:
_lowercase = int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
_lowercase = name.replace(F"""refinenet{layer_idx}""" , F"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
_lowercase = name.replace('out_conv' , 'projection' )
if "resConfUnit1" in name:
_lowercase = name.replace('resConfUnit1' , 'residual_layer1' )
if "resConfUnit2" in name:
_lowercase = name.replace('resConfUnit2' , 'residual_layer2' )
if "conv1" in name:
_lowercase = name.replace('conv1' , 'convolution1' )
if "conv2" in name:
_lowercase = name.replace('conv2' , 'convolution2' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
_lowercase = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0' )
if "pretrained.act_postprocess2.0.project.0" in name:
_lowercase = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0' )
if "pretrained.act_postprocess3.0.project.0" in name:
_lowercase = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0' )
if "pretrained.act_postprocess4.0.project.0" in name:
_lowercase = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
_lowercase = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection' )
if "pretrained.act_postprocess1.4" in name:
_lowercase = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize' )
if "pretrained.act_postprocess2.3" in name:
_lowercase = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection' )
if "pretrained.act_postprocess2.4" in name:
_lowercase = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize' )
if "pretrained.act_postprocess3.3" in name:
_lowercase = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection' )
if "pretrained.act_postprocess4.3" in name:
_lowercase = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection' )
if "pretrained.act_postprocess4.4" in name:
_lowercase = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize' )
if "pretrained" in name:
_lowercase = name.replace('pretrained' , 'dpt' )
if "bn" in name:
_lowercase = name.replace('bn' , 'batch_norm' )
if "head" in name:
_lowercase = name.replace('head' , 'head.head' )
if "encoder.norm" in name:
_lowercase = name.replace('encoder.norm' , 'layernorm' )
if "auxlayer" in name:
_lowercase = name.replace('auxlayer' , 'auxiliary_head.head' )
if "backbone" in name:
_lowercase = name.replace('backbone' , 'backbone.bit.encoder' )
if ".." in name:
_lowercase = name.replace('..' , '.' )
if "stem.conv" in name:
_lowercase = name.replace('stem.conv' , 'bit.embedder.convolution' )
if "blocks" in name:
_lowercase = name.replace('blocks' , 'layers' )
if "convolution" in name and "backbone" in name:
_lowercase = name.replace('convolution' , 'conv' )
if "layer" in name and "backbone" in name:
_lowercase = name.replace('layer' , 'layers' )
if "backbone.bit.encoder.bit" in name:
_lowercase = name.replace('backbone.bit.encoder.bit' , 'backbone.bit' )
if "embedder.conv" in name:
_lowercase = name.replace('embedder.conv' , 'embedder.convolution' )
if "backbone.bit.encoder.stem.norm" in name:
_lowercase = name.replace('backbone.bit.encoder.stem.norm' , 'backbone.bit.embedder.norm' )
return name
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[str] , snake_case__ :int ) -> Dict:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowercase = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
_lowercase = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowercase = in_proj_weight[: config.hidden_size, :]
_lowercase = in_proj_bias[: config.hidden_size]
_lowercase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowercase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowercase = in_proj_weight[
-config.hidden_size :, :
]
_lowercase = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
_lowercase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowercase = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[int] , snake_case__ :List[Any] , snake_case__ :str , snake_case__ :Any , snake_case__ :List[str] ) -> str:
_lowercase , _lowercase = get_dpt_config(snake_case__ )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
_lowercase = torch.load(snake_case__ , map_location='cpu' )
# remove certain keys
remove_ignore_keys_(snake_case__ )
# rename keys
for key in state_dict.copy().keys():
_lowercase = state_dict.pop(snake_case__ )
_lowercase = val
# read in qkv matrices
read_in_q_k_v(snake_case__ , snake_case__ )
# load HuggingFace model
_lowercase = DPTForSemanticSegmentation(snake_case__ ) if 'ade' in checkpoint_url else DPTForDepthEstimation(snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
# Check outputs on an image
_lowercase = 480 if 'ade' in checkpoint_url else 384
_lowercase = DPTImageProcessor(size=snake_case__ )
_lowercase = prepare_img()
_lowercase = image_processor(snake_case__ , return_tensors='pt' )
# forward pass
_lowercase = model(**snake_case__ ).logits if 'ade' in checkpoint_url else model(**snake_case__ ).predicted_depth
if show_prediction:
_lowercase = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='bicubic' , align_corners=snake_case__ , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(snake_case__ )
if push_to_hub:
model.push_to_hub('ybelkada/dpt-hybrid-midas' )
image_processor.push_to_hub('ybelkada/dpt-hybrid-midas' )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you're pushing to the hub.""",
)
parser.add_argument(
"""--show_prediction""",
action="""store_true""",
)
snake_case = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
) | 67 | 0 |
"""simple docstring"""
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class __A ( SCREAMING_SNAKE_CASE_ ):
# to overwrite at feature extractactor specific tests
UpperCAmelCase__ = None
UpperCAmelCase__ = None
@property
def lowerCamelCase__ ( self : Dict ) -> Any:
return self.feat_extract_tester.prepare_feat_extract_dict()
def lowerCamelCase__ ( self : List[Any] ) -> Any:
__magic_name__: Dict = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(__snake_case , """feature_size""" ) )
self.assertTrue(hasattr(__snake_case , """sampling_rate""" ) )
self.assertTrue(hasattr(__snake_case , """padding_value""" ) )
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
__magic_name__: List[Any] = self.feat_extract_tester.prepare_inputs_for_common()
__magic_name__: List[str] = self.feature_extraction_class(**self.feat_extract_dict )
__magic_name__: str = feat_extract.model_input_names[0]
__magic_name__: Any = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(__snake_case ) == len(__snake_case ) for x, y in zip(__snake_case , processed_features[input_name] ) ) )
__magic_name__: List[Any] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=__snake_case )
__magic_name__: str = BatchFeature({input_name: speech_inputs} , tensor_type="""np""" )
__magic_name__: List[str] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__magic_name__: Optional[int] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def lowerCamelCase__ ( self : Tuple ) -> List[Any]:
__magic_name__: Tuple = self.feat_extract_tester.prepare_inputs_for_common(equal_length=__snake_case )
__magic_name__: Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
__magic_name__: Dict = feat_extract.model_input_names[0]
__magic_name__: Optional[int] = BatchFeature({input_name: speech_inputs} , tensor_type="""pt""" )
__magic_name__: Union[str, Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__magic_name__: int = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def lowerCamelCase__ ( self : Optional[int] ) -> Optional[int]:
__magic_name__: Dict = self.feat_extract_tester.prepare_inputs_for_common(equal_length=__snake_case )
__magic_name__: Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
__magic_name__: Optional[Any] = feat_extract.model_input_names[0]
__magic_name__: Tuple = BatchFeature({input_name: speech_inputs} , tensor_type="""tf""" )
__magic_name__: Optional[int] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__magic_name__: Any = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def lowerCamelCase__ ( self : Optional[Any] , __snake_case : Optional[Any]=False ) -> Any:
def _inputs_have_equal_length(__snake_case : int ):
__magic_name__: Union[str, Any] = len(input[0] )
for input_slice in input[1:]:
if len(__snake_case ) != length:
return False
return True
def _inputs_are_equal(__snake_case : Tuple , __snake_case : List[str] ):
if len(__snake_case ) != len(__snake_case ):
return False
for input_slice_a, input_slice_a in zip(__snake_case , __snake_case ):
if not np.allclose(np.asarray(__snake_case ) , np.asarray(__snake_case ) , atol=1E-3 ):
return False
return True
__magic_name__: str = self.feature_extraction_class(**self.feat_extract_dict )
__magic_name__: Any = self.feat_extract_tester.prepare_inputs_for_common(numpify=__snake_case )
__magic_name__: Tuple = feat_extract.model_input_names[0]
__magic_name__: Tuple = BatchFeature({input_name: speech_inputs} )
__magic_name__: List[str] = self.feat_extract_tester.seq_length_diff
__magic_name__: Optional[Any] = self.feat_extract_tester.max_seq_length + pad_diff
__magic_name__: Tuple = self.feat_extract_tester.min_seq_length
__magic_name__: List[str] = self.feat_extract_tester.batch_size
__magic_name__: int = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
__magic_name__: Union[str, Any] = feat_extract.pad(__snake_case , padding=__snake_case )
__magic_name__: Optional[Any] = input_a[input_name]
__magic_name__: str = feat_extract.pad(__snake_case , padding="""longest""" )
__magic_name__: str = input_a[input_name]
__magic_name__: Optional[int] = feat_extract.pad(__snake_case , padding="""max_length""" , max_length=len(speech_inputs[-1] ) )
__magic_name__: Any = input_a[input_name]
__magic_name__: str = feat_extract.pad(__snake_case , padding="""longest""" , return_tensors="""np""" )
__magic_name__: Any = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(__snake_case ):
feat_extract.pad(__snake_case , padding="""max_length""" )[input_name]
__magic_name__: Union[str, Any] = feat_extract.pad(
__snake_case , padding="""max_length""" , max_length=__snake_case , return_tensors="""np""" )
__magic_name__: Tuple = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(__snake_case ) )
self.assertTrue(_inputs_have_equal_length(__snake_case ) )
self.assertTrue(_inputs_have_equal_length(__snake_case ) )
self.assertTrue(_inputs_are_equal(__snake_case , __snake_case ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
__magic_name__: Optional[int] = feat_extract.pad(__snake_case , pad_to_multiple_of=1_0 )
__magic_name__: int = input_a[input_name]
__magic_name__: Tuple = feat_extract.pad(__snake_case , padding="""longest""" , pad_to_multiple_of=1_0 )
__magic_name__: List[Any] = input_a[input_name]
__magic_name__: Tuple = feat_extract.pad(
__snake_case , padding="""max_length""" , pad_to_multiple_of=1_0 , max_length=__snake_case )
__magic_name__: int = input_a[input_name]
__magic_name__: Tuple = feat_extract.pad(
__snake_case , padding="""max_length""" , pad_to_multiple_of=1_0 , max_length=__snake_case , return_tensors="""np""" , )
__magic_name__: List[Any] = input_a[input_name]
self.assertTrue(all(len(__snake_case ) % 1_0 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(__snake_case , __snake_case ) )
__magic_name__: Tuple = pad_max_length if pad_max_length % 1_0 == 0 else (pad_max_length // 1_0 + 1) * 1_0
self.assertTrue(all(len(__snake_case ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
__magic_name__: Optional[int] = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1E-3 )
def lowerCamelCase__ ( self : Any , __snake_case : Optional[int]=False ) -> Optional[Any]:
def _inputs_have_equal_length(__snake_case : Union[str, Any] ):
__magic_name__: Tuple = len(input[0] )
for input_slice in input[1:]:
if len(__snake_case ) != length:
return False
return True
def _inputs_are_equal(__snake_case : List[str] , __snake_case : Optional[int] ):
if len(__snake_case ) != len(__snake_case ):
return False
for input_slice_a, input_slice_a in zip(__snake_case , __snake_case ):
if not np.allclose(np.asarray(__snake_case ) , np.asarray(__snake_case ) , atol=1E-3 ):
return False
return True
__magic_name__: List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
__magic_name__: List[str] = self.feat_extract_tester.prepare_inputs_for_common(numpify=__snake_case )
__magic_name__: str = feat_extract.model_input_names[0]
__magic_name__: Optional[int] = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
__magic_name__: Optional[int] = feat_extract.pad(
__snake_case , padding="""max_length""" , max_length=len(speech_inputs[0] ) , truncation=__snake_case )
__magic_name__: int = input_a[input_name]
__magic_name__: Optional[int] = feat_extract.pad(__snake_case , padding="""max_length""" , max_length=len(speech_inputs[0] ) )
__magic_name__: Dict = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(__snake_case ) )
self.assertFalse(_inputs_have_equal_length(__snake_case ) )
# truncate to smallest with np
__magic_name__: Union[str, Any] = feat_extract.pad(
__snake_case , padding="""max_length""" , max_length=len(speech_inputs[0] ) , return_tensors="""np""" , truncation=__snake_case , )
__magic_name__: Optional[Any] = input_a[input_name]
__magic_name__: Dict = feat_extract.pad(
__snake_case , padding="""max_length""" , max_length=len(speech_inputs[0] ) , return_tensors="""np""" )
__magic_name__: Optional[Any] = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(__snake_case ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(__snake_case ) )
# truncate to middle
__magic_name__: List[str] = feat_extract.pad(
__snake_case , padding="""max_length""" , max_length=len(speech_inputs[1] ) , truncation=__snake_case , return_tensors="""np""" , )
__magic_name__: Tuple = input_a[input_name]
__magic_name__: Optional[int] = feat_extract.pad(
__snake_case , padding="""max_length""" , max_length=len(speech_inputs[1] ) , truncation=__snake_case )
__magic_name__: Any = input_a[input_name]
__magic_name__: Tuple = feat_extract.pad(
__snake_case , padding="""max_length""" , max_length=len(speech_inputs[1] ) , return_tensors="""np""" )
__magic_name__: Tuple = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(__snake_case ) )
self.assertTrue(_inputs_have_equal_length(__snake_case ) )
self.assertTrue(_inputs_are_equal(__snake_case , __snake_case ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(__snake_case ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(__snake_case ):
feat_extract.pad(__snake_case , truncation=__snake_case )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(__snake_case ):
feat_extract.pad(__snake_case , padding="""longest""" , truncation=__snake_case )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(__snake_case ):
feat_extract.pad(__snake_case , padding="""longest""" , truncation=__snake_case )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(__snake_case ):
feat_extract.pad(__snake_case , padding="""max_length""" , truncation=__snake_case )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
__magic_name__: Tuple = 1_2
__magic_name__: Optional[Any] = feat_extract.pad(
__snake_case , padding="""max_length""" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=__snake_case , truncation=__snake_case , )
__magic_name__: Tuple = input_a[input_name]
__magic_name__: List[str] = feat_extract.pad(
__snake_case , padding="""max_length""" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=__snake_case , )
__magic_name__: Optional[Any] = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
__magic_name__: List[Any] = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
__magic_name__: List[Any] = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(__snake_case ) )
self.assertFalse(_inputs_have_equal_length(__snake_case ) )
def lowerCamelCase__ ( self : Tuple ) -> Optional[int]:
self._check_padding(numpify=__snake_case )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Dict:
self._check_padding(numpify=__snake_case )
def lowerCamelCase__ ( self : Optional[int] ) -> int:
self._check_truncation(numpify=__snake_case )
def lowerCamelCase__ ( self : int ) -> int:
self._check_truncation(numpify=__snake_case )
@require_torch
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
__magic_name__: List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
__magic_name__: Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common()
__magic_name__: Optional[int] = feat_extract.model_input_names[0]
__magic_name__: Optional[Any] = BatchFeature({input_name: speech_inputs} )
__magic_name__: List[Any] = feat_extract.pad(__snake_case , padding="""longest""" , return_tensors="""np""" )[input_name]
__magic_name__: Any = feat_extract.pad(__snake_case , padding="""longest""" , return_tensors="""pt""" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
@require_tf
def lowerCamelCase__ ( self : Union[str, Any] ) -> int:
__magic_name__: Dict = self.feature_extraction_class(**self.feat_extract_dict )
__magic_name__: str = self.feat_extract_tester.prepare_inputs_for_common()
__magic_name__: Any = feat_extract.model_input_names[0]
__magic_name__: Union[str, Any] = BatchFeature({input_name: speech_inputs} )
__magic_name__: int = feat_extract.pad(__snake_case , padding="""longest""" , return_tensors="""np""" )[input_name]
__magic_name__: Tuple = feat_extract.pad(__snake_case , padding="""longest""" , return_tensors="""tf""" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def lowerCamelCase__ ( self : Union[str, Any] ) -> int:
__magic_name__: Optional[Any] = self.feat_extract_dict
__magic_name__: Optional[Any] = True
__magic_name__: List[str] = self.feature_extraction_class(**__snake_case )
__magic_name__: List[str] = self.feat_extract_tester.prepare_inputs_for_common()
__magic_name__: Union[str, Any] = [len(__snake_case ) for x in speech_inputs]
__magic_name__: int = feat_extract.model_input_names[0]
__magic_name__: List[str] = BatchFeature({input_name: speech_inputs} )
__magic_name__: Tuple = feat_extract.pad(__snake_case , padding="""longest""" , return_tensors="""np""" )
self.assertIn("""attention_mask""" , __snake_case )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , __snake_case )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Tuple:
__magic_name__: Any = self.feat_extract_dict
__magic_name__: Optional[Any] = True
__magic_name__: str = self.feature_extraction_class(**__snake_case )
__magic_name__: Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common()
__magic_name__: str = [len(__snake_case ) for x in speech_inputs]
__magic_name__: Union[str, Any] = feat_extract.model_input_names[0]
__magic_name__: Optional[int] = BatchFeature({input_name: speech_inputs} )
__magic_name__: Union[str, Any] = min(__snake_case )
__magic_name__: Dict = feat_extract.pad(
__snake_case , padding="""max_length""" , max_length=__snake_case , truncation=__snake_case , return_tensors="""np""" )
self.assertIn("""attention_mask""" , __snake_case )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 96 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
snake_case = {"""configuration_vit_mae""": ["""VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMAEConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMAEForPreTraining""",
"""ViTMAELayer""",
"""ViTMAEModel""",
"""ViTMAEPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""TFViTMAEForPreTraining""",
"""TFViTMAEModel""",
"""TFViTMAEPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 67 | 0 |
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowercase__( UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
@register_to_config
def __init__( self : str , *,
SCREAMING_SNAKE_CASE_ : int = 4 , SCREAMING_SNAKE_CASE_ : int = 7_6_8 , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Tuple , ) -> Tuple:
super().__init__()
lowercase_ = nn.Parameter(torch.zeros(SCREAMING_SNAKE_CASE_ ) )
# parameters for additional clip time embeddings
lowercase_ = nn.Linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = nn.Linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# parameters for encoder hidden states
lowercase_ = clip_extra_context_tokens
lowercase_ = nn.Linear(
SCREAMING_SNAKE_CASE_ , self.clip_extra_context_tokens * cross_attention_dim )
lowercase_ = nn.Linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = nn.LayerNorm(SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Optional[int] , *, SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict ) -> List[Any]:
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
lowercase_ = image_embeddings.shape[0]
lowercase_ = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
lowercase_ = classifier_free_guidance_embeddings.expand(
SCREAMING_SNAKE_CASE_ , -1 )
lowercase_ = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
lowercase_ = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
lowercase_ = self.embedding_proj(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.clip_image_embeddings_project_to_time_embeddings(SCREAMING_SNAKE_CASE_ )
lowercase_ = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
lowercase_ = self.clip_extra_context_tokens_proj(SCREAMING_SNAKE_CASE_ )
lowercase_ = clip_extra_context_tokens.reshape(SCREAMING_SNAKE_CASE_ , -1 , self.clip_extra_context_tokens )
lowercase_ = clip_extra_context_tokens.permute(0 , 2 , 1 )
lowercase_ = self.encoder_hidden_states_proj(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.text_encoder_hidden_states_norm(SCREAMING_SNAKE_CASE_ )
lowercase_ = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 97 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline | 67 | 0 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : Any ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
_UpperCamelCase = get_activation('''gelu''' )
self.assertTrue(torch.allclose(gelu_python(lowerCAmelCase__ ) , torch_builtin(lowerCAmelCase__ ) ) )
self.assertFalse(torch.allclose(gelu_python(lowerCAmelCase__ ) , gelu_new(lowerCAmelCase__ ) ) )
def snake_case__ ( self : int ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
_UpperCamelCase = get_activation('''gelu''' )
_UpperCamelCase = get_activation('''gelu_10''' )
_UpperCamelCase = torch_builtin(lowerCAmelCase__ )
_UpperCamelCase = geluaa(lowerCAmelCase__ )
_UpperCamelCase = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(lowerCAmelCase__ ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def snake_case__ ( self : Dict ) -> int:
'''simple docstring'''
get_activation('''gelu''' )
get_activation('''gelu_10''' )
get_activation('''gelu_fast''' )
get_activation('''gelu_new''' )
get_activation('''gelu_python''' )
get_activation('''gelu_pytorch_tanh''' )
get_activation('''linear''' )
get_activation('''mish''' )
get_activation('''quick_gelu''' )
get_activation('''relu''' )
get_activation('''sigmoid''' )
get_activation('''silu''' )
get_activation('''swish''' )
get_activation('''tanh''' )
with self.assertRaises(lowerCAmelCase__ ):
get_activation('''bogus''' )
with self.assertRaises(lowerCAmelCase__ ):
get_activation(lowerCAmelCase__ )
def snake_case__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = get_activation('''gelu''' )
_UpperCamelCase = 1
_UpperCamelCase = get_activation('''gelu''' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(lowerCAmelCase__ ):
_UpperCamelCase = acta.a
| 98 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""",
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = '''transfo-xl'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['''mems''']
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Optional[Any] ,__A : Union[str, Any]=26_7735 ,__A : List[Any]=[2_0000, 4_0000, 20_0000] ,__A : Dict=1024 ,__A : str=1024 ,__A : Dict=16 ,__A : int=64 ,__A : Dict=4096 ,__A : List[Any]=4 ,__A : Optional[int]=False ,__A : Union[str, Any]=18 ,__A : Tuple=1600 ,__A : str=1000 ,__A : Dict=True ,__A : Dict=True ,__A : int=0 ,__A : Optional[int]=-1 ,__A : int=True ,__A : List[str]=0.1 ,__A : Optional[int]=0.0 ,__A : str=True ,__A : Tuple="normal" ,__A : Union[str, Any]=0.01 ,__A : Tuple=0.01 ,__A : Any=0.02 ,__A : Union[str, Any]=1e-5 ,__A : List[Any]=0 ,**__A : str ,) -> List[Any]:
_lowercase = vocab_size
_lowercase = []
self.cutoffs.extend(__A )
if proj_share_all_but_first:
_lowercase = [False] + [True] * len(self.cutoffs )
else:
_lowercase = [False] + [False] * len(self.cutoffs )
_lowercase = d_model
_lowercase = d_embed
_lowercase = d_head
_lowercase = d_inner
_lowercase = div_val
_lowercase = pre_lnorm
_lowercase = n_layer
_lowercase = n_head
_lowercase = mem_len
_lowercase = same_length
_lowercase = attn_type
_lowercase = clamp_len
_lowercase = sample_softmax
_lowercase = adaptive
_lowercase = dropout
_lowercase = dropatt
_lowercase = untie_r
_lowercase = init
_lowercase = init_range
_lowercase = proj_init_std
_lowercase = init_std
_lowercase = layer_norm_epsilon
super().__init__(eos_token_id=__A ,**__A )
@property
def __UpperCAmelCase ( self : str ) -> Optional[int]:
# Message copied from Transformer-XL documentation
logger.info(F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def __UpperCAmelCase ( self : Any ,__A : Dict ) -> Optional[Any]:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" ) | 67 | 0 |
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument('--user', type=str, default='ubuntu')
parser.add_argument('--host', type=str, default='localhost')
parser.add_argument('--key_path', type=str, default=None)
parser.add_argument('--instance', type=str, default='V100:1')
parser.add_argument('--provider', type=str, default='cheapest')
parser.add_argument('--use_spot', type=bool, default=False)
parser.add_argument('--example', type=str, default='pytorch/text-generation/run_generation.py')
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('Cannot specify both BYO and on-demand cluster args')
SCREAMING_SNAKE_CASE = rh.cluster(
name='rh-cluster', ips=[args.host], ssh_creds={'ssh_user': args.user, 'ssh_private_key': args.key_path}
)
else:
SCREAMING_SNAKE_CASE = rh.cluster(
name='rh-cluster', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
SCREAMING_SNAKE_CASE = args.example.rsplit('/', 1)[0]
# Set up remote environment
cluster.install_packages(['pip:./']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f'''pip install -r transformers/examples/{example_dir}/requirements.txt'''])
cluster.run(['pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f'''python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}'''])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 99 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = '''dpr'''
def __init__( self : int ,__A : Union[str, Any]=3_0522 ,__A : Optional[int]=768 ,__A : int=12 ,__A : List[Any]=12 ,__A : Optional[Any]=3072 ,__A : Union[str, Any]="gelu" ,__A : Union[str, Any]=0.1 ,__A : List[Any]=0.1 ,__A : str=512 ,__A : List[str]=2 ,__A : Tuple=0.02 ,__A : Tuple=1e-12 ,__A : List[Any]=0 ,__A : List[str]="absolute" ,__A : int = 0 ,**__A : int ,) -> Tuple:
super().__init__(pad_token_id=__A ,**__A )
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = hidden_act
_lowercase = intermediate_size
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = max_position_embeddings
_lowercase = type_vocab_size
_lowercase = initializer_range
_lowercase = layer_norm_eps
_lowercase = projection_dim
_lowercase = position_embedding_type | 67 | 0 |
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Any = IFInpaintingPipeline
lowerCamelCase__ : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
lowerCamelCase__ : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowerCamelCase__ : Tuple = PipelineTesterMixin.required_optional_params - {"""latents"""}
def lowercase_ ( self ):
'''simple docstring'''
return self._get_dummy_components()
def lowercase_ ( self , A_ , A_=0 ):
'''simple docstring'''
if str(A_ ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__ = torch.manual_seed(A_ )
else:
SCREAMING_SNAKE_CASE__ = torch.Generator(device=A_ ).manual_seed(A_ )
SCREAMING_SNAKE_CASE__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(A_ ) ).to(A_ )
SCREAMING_SNAKE_CASE__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(A_ ) ).to(A_ )
SCREAMING_SNAKE_CASE__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowercase_ ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def lowercase_ ( self ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def lowercase_ ( self ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowercase_ ( self ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowercase_ ( self ):
'''simple docstring'''
self._test_save_load_local()
def lowercase_ ( self ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 100 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
snake_case = Lock()
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[int] , snake_case__ :Union[str, Any] , snake_case__ :Tuple , snake_case__ :Any , snake_case__ :Dict , snake_case__ :Optional[int] , snake_case__ :List[str] ) -> Optional[Any]:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(snake_case__ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
_lowercase = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
_lowercase = min(snake_case__ , snake_case__ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(snake_case__ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
_lowercase = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
_lowercase = max(snake_case__ , snake_case__ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> Dict:
_lowercase = []
_lowercase = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
_lowercase = Pipe()
_lowercase = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
_lowercase = temp_rs
_lowercase = temp_rr
for i in range(1 , len(snake_case__ ) - 1 ):
_lowercase = Pipe()
_lowercase = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
_lowercase = temp_rs
_lowercase = temp_rr
process_array_.append(
Process(
target=snake_case__ , args=(
len(snake_case__ ) - 1,
arr[len(snake_case__ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(snake_case__ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(snake_case__ ) ):
_lowercase = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
_lowercase = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*snake_case__ )
_lowercase = odd_even_transposition(snake_case__ )
print('Sorted List\n' )
print(*snake_case__ )
if __name__ == "__main__":
main() | 67 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.