code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
from __future__ import annotations
class UpperCAmelCase_ :
def __init__( self , a , a ) -> Union[str, Any]:
lowercase__ , lowercase__ : Any = text, pattern
lowercase__ , lowercase__ : List[str] = len(a ), len(a )
def _UpperCAmelCase ( self , a ) -> int:
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def _UpperCAmelCase ( self , a ) -> int:
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def _UpperCAmelCase ( self ) -> list[int]:
# searches pattern in text and returns index positions
lowercase__ : Union[str, Any] = []
for i in range(self.textLen - self.patLen + 1 ):
lowercase__ : Optional[int] = self.mismatch_in_text(a )
if mismatch_index == -1:
positions.append(a )
else:
lowercase__ : List[str] = self.match_in_pattern(self.text[mismatch_index] )
lowercase__ : List[Any] = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
_UpperCamelCase : int = "ABAABA"
_UpperCamelCase : Optional[int] = "AB"
_UpperCamelCase : List[str] = BoyerMooreSearch(text, pattern)
_UpperCamelCase : Dict = bms.bad_character_heuristic()
if len(positions) == 0:
print("No match found")
else:
print("Pattern found in following positions: ")
print(positions)
| 645
|
"""simple docstring"""
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
_UpperCamelCase : Dict = logging.get_logger(__name__)
_UpperCamelCase : List[Any] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"encoder.layer_norm_for_extract": "layer_norm_for_extract",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"label_embs_concat": "label_embeddings_concat",
"mask_emb": "masked_spec_embed",
"spk_proj": "speaker_proj",
}
_UpperCamelCase : List[str] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"label_embeddings_concat",
"speaker_proj",
"layer_norm_for_extract",
]
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple ):
'''simple docstring'''
for attribute in key.split('.' ):
lowercase__ : Dict = getattr(_lowerCAmelCase , _lowerCAmelCase )
if weight_type is not None:
lowercase__ : Optional[int] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape
else:
lowercase__ : Optional[int] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
lowercase__ : Optional[Any] = value
elif weight_type == "weight_g":
lowercase__ : Dict = value
elif weight_type == "weight_v":
lowercase__ : List[str] = value
elif weight_type == "bias":
lowercase__ : Optional[Any] = value
else:
lowercase__ : List[str] = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : Tuple = []
lowercase__ : List[str] = fairseq_model.state_dict()
lowercase__ : Union[str, Any] = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
lowercase__ : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == 'group' , )
lowercase__ : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
lowercase__ : List[Any] = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
lowercase__ : int = True
if "*" in mapped_key:
lowercase__ : Optional[int] = name.split(_lowerCAmelCase )[0].split('.' )[-2]
lowercase__ : List[str] = mapped_key.replace('*' , _lowerCAmelCase )
if "weight_g" in name:
lowercase__ : List[Any] = 'weight_g'
elif "weight_v" in name:
lowercase__ : int = 'weight_v'
elif "bias" in name:
lowercase__ : Dict = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase__ : Union[str, Any] = 'weight'
else:
lowercase__ : int = None
set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def a_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Dict ):
'''simple docstring'''
lowercase__ : int = full_name.split('conv_layers.' )[-1]
lowercase__ : int = name.split('.' )
lowercase__ : int = int(items[0] )
lowercase__ : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
lowercase__ : Union[str, Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
lowercase__ : Optional[int] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
lowercase__ : List[Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
lowercase__ : int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_lowerCAmelCase )
@torch.no_grad()
def a_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : str=None , _lowerCAmelCase : Tuple=True ):
'''simple docstring'''
if config_path is not None:
lowercase__ : Any = UniSpeechSatConfig.from_pretrained(_lowerCAmelCase )
else:
lowercase__ : Any = UniSpeechSatConfig()
lowercase__ : Union[str, Any] = ''
if is_finetuned:
lowercase__ : Optional[Any] = UniSpeechSatForCTC(_lowerCAmelCase )
else:
lowercase__ : List[Any] = UniSpeechSatForPreTraining(_lowerCAmelCase )
lowercase__ , lowercase__ , lowercase__ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
lowercase__ : Union[str, Any] = model[0].eval()
recursively_load_weights(_lowerCAmelCase , _lowerCAmelCase )
hf_wavavec.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_UpperCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_UpperCamelCase : str = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 645
| 1
|
"""simple docstring"""
def a_ ( _lowerCAmelCase : int = 400_0000 ):
'''simple docstring'''
lowercase__ : Optional[Any] = [0, 1]
lowercase__ : Dict = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
lowercase__ : Tuple = 0
for j in range(len(_lowerCAmelCase ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 645
|
"""simple docstring"""
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class UpperCAmelCase_ :
def __init__( self , a , a=1_3 , a=3_2 , a=2 , a=3 , a=1_6 , a=[1, 2, 1] , a=[2, 2, 4] , a=2 , a=2.0 , a=True , a=0.0 , a=0.0 , a=0.1 , a="gelu" , a=False , a=True , a=0.02 , a=1e-5 , a=True , a=None , a=True , a=1_0 , a=8 , a=["stage1", "stage2", "stage3"] , a=[1, 2, 3] , ) -> int:
lowercase__ : int = parent
lowercase__ : Union[str, Any] = batch_size
lowercase__ : Dict = image_size
lowercase__ : str = patch_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : List[str] = embed_dim
lowercase__ : Any = depths
lowercase__ : Dict = num_heads
lowercase__ : List[str] = window_size
lowercase__ : int = mlp_ratio
lowercase__ : Tuple = qkv_bias
lowercase__ : Union[str, Any] = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : Tuple = drop_path_rate
lowercase__ : List[str] = hidden_act
lowercase__ : Optional[Any] = use_absolute_embeddings
lowercase__ : Optional[Any] = patch_norm
lowercase__ : Any = layer_norm_eps
lowercase__ : List[Any] = initializer_range
lowercase__ : List[str] = is_training
lowercase__ : int = scope
lowercase__ : Optional[int] = use_labels
lowercase__ : List[Any] = type_sequence_label_size
lowercase__ : List[str] = encoder_stride
lowercase__ : Optional[Any] = out_features
lowercase__ : Dict = out_indices
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Optional[Any] = None
if self.use_labels:
lowercase__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Tuple = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _UpperCAmelCase ( self , a , a , a ) -> Dict:
lowercase__ : Tuple = MaskFormerSwinModel(config=a )
model.to(a )
model.eval()
lowercase__ : str = model(a )
lowercase__ : str = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowercase__ : Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _UpperCAmelCase ( self , a , a , a ) -> Optional[int]:
lowercase__ : List[Any] = MaskFormerSwinBackbone(config=a )
model.to(a )
model.eval()
lowercase__ : int = model(a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [1_3, 1_6, 1_6, 1_6] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [1_6, 3_2, 6_4] )
# verify ValueError
with self.parent.assertRaises(a ):
lowercase__ : Dict = ['stem']
lowercase__ : List[str] = MaskFormerSwinBackbone(config=a )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : int = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Tuple = config_and_inputs
lowercase__ : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : Optional[int] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : List[str] = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
lowerCamelCase__ : str = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : int = False
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : str = MaskFormerSwinModelTester(self )
lowercase__ : Tuple = ConfigTester(self , config_class=a , embed_dim=3_7 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'
' `nn.DataParallel`'
) )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
def _UpperCAmelCase ( self ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCAmelCase ( self ) -> str:
return
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*a )
@unittest.skip('Swin does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> Tuple:
pass
@unittest.skip('Swin does not support feedforward chunking' )
def _UpperCAmelCase ( self ) -> Tuple:
pass
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def _UpperCAmelCase ( self ) -> str:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Any = model_class(a )
lowercase__ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[Any] = [*signature.parameters.keys()]
lowercase__ : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
@unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' )
def _UpperCAmelCase ( self ) -> List[Any]:
pass
@unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' )
def _UpperCAmelCase ( self ) -> int:
pass
def _UpperCAmelCase ( self , a , a , a , a ) -> Tuple:
lowercase__ : Dict = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : str = model(**self._prepare_for_class(a , a ) )
lowercase__ : List[Any] = outputs.hidden_states
lowercase__ : str = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(a ) , a )
# Swin has a different seq_length
lowercase__ : Dict = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowercase__ : List[str] = True
self.check_hidden_states_output(a , a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : List[str] = True
self.check_hidden_states_output(a , a , a , a )
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Union[str, Any] = 3
lowercase__ : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase__ : Tuple = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase__ : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowercase__ : List[str] = True
self.check_hidden_states_output(a , a , a , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : int = True
self.check_hidden_states_output(a , a , a , (padded_height, padded_width) )
@unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def _UpperCAmelCase ( self ) -> Any:
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def _UpperCAmelCase ( self ) -> Any:
pass
def _UpperCAmelCase ( self ) -> Any:
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(a ):
lowercase__ : Union[str, Any] = 0
return t
def check_equivalence(a , a , a , a={} ):
with torch.no_grad():
lowercase__ : Optional[Any] = model(**a , return_dict=a , **a )
lowercase__ : Optional[int] = model(**a , return_dict=a , **a ).to_tuple()
def recursive_check(a , a ):
if isinstance(a , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(a , a ):
recursive_check(a , a )
elif isinstance(a , a ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(a , a )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(a ) , set_nan_tensor_to_zero(a ) , atol=1e-5 ) , msg=(
'Tuple and dict output are not equal. Difference:'
f""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"""
f""" {torch.isnan(a ).any()} and `inf`: {torch.isinf(a )}. Dict has"""
f""" `nan`: {torch.isnan(a ).any()} and `inf`: {torch.isinf(a )}."""
) , )
recursive_check(a , a )
for model_class in self.all_model_classes:
lowercase__ : Any = model_class(a )
model.to(a )
model.eval()
lowercase__ : Tuple = self._prepare_for_class(a , a )
lowercase__ : Optional[Any] = self._prepare_for_class(a , a )
check_equivalence(a , a , a )
lowercase__ : Any = self._prepare_for_class(a , a , return_labels=a )
lowercase__ : List[Any] = self._prepare_for_class(a , a , return_labels=a )
check_equivalence(a , a , a )
lowercase__ : Any = self._prepare_for_class(a , a )
lowercase__ : int = self._prepare_for_class(a , a )
check_equivalence(a , a , a , {'output_hidden_states': True} )
lowercase__ : Dict = self._prepare_for_class(a , a , return_labels=a )
lowercase__ : Optional[int] = self._prepare_for_class(a , a , return_labels=a )
check_equivalence(a , a , a , {'output_hidden_states': True} )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase , _a):
lowerCamelCase__ : Dict = (MaskFormerSwinBackbone,) if is_torch_available() else ()
lowerCamelCase__ : Optional[int] = MaskFormerSwinConfig
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ : Optional[int] = MaskFormerSwinModelTester(self )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : int = inputs_dict['pixel_values'].shape[0]
for backbone_class in self.all_model_classes:
lowercase__ : Optional[Any] = backbone_class(a )
backbone.to(a )
backbone.eval()
lowercase__ : Union[str, Any] = backbone(**a )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , a )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
lowercase__ : List[str] = backbone(**a , output_hidden_states=a )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
lowercase__ , lowercase__ , lowercase__ : int = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
lowercase__ : List[Any] = backbone(**a , output_attentions=a )
self.assertIsNotNone(outputs.attentions )
| 645
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCAmelCase_ ( unittest.TestCase):
def __init__( self , a , a=7 , a=3 , a=1_8 , a=3_0 , a=4_0_0 , a=True , a=None , a=True , a=None , a=True , ) -> List[str]:
lowercase__ : Tuple = size if size is not None else {'shortest_edge': 2_0}
lowercase__ : Union[str, Any] = crop_size if crop_size is not None else {'height': 1_8, 'width': 1_8}
lowercase__ : Optional[int] = parent
lowercase__ : Optional[int] = batch_size
lowercase__ : str = num_channels
lowercase__ : Any = image_size
lowercase__ : Optional[Any] = min_resolution
lowercase__ : int = max_resolution
lowercase__ : List[Any] = do_resize
lowercase__ : List[str] = size
lowercase__ : str = do_center_crop
lowercase__ : List[Any] = crop_size
lowercase__ : Union[str, Any] = do_flip_channel_order
def _UpperCAmelCase ( self ) -> int:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class UpperCAmelCase_ ( _a , unittest.TestCase):
lowerCamelCase__ : Optional[Any] = MobileViTImageProcessor if is_vision_available() else None
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Tuple = MobileViTImageProcessingTester(self )
@property
def _UpperCAmelCase ( self ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , 'do_resize' ) )
self.assertTrue(hasattr(a , 'size' ) )
self.assertTrue(hasattr(a , 'do_center_crop' ) )
self.assertTrue(hasattr(a , 'center_crop' ) )
self.assertTrue(hasattr(a , 'do_flip_channel_order' ) )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 2_0} )
self.assertEqual(image_processor.crop_size , {'height': 1_8, 'width': 1_8} )
lowercase__ : str = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {'shortest_edge': 4_2} )
self.assertEqual(image_processor.crop_size , {'height': 8_4, 'width': 8_4} )
def _UpperCAmelCase ( self ) -> Tuple:
pass
def _UpperCAmelCase ( self ) -> str:
# Initialize image_processing
lowercase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
lowercase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase__ : List[Any] = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _UpperCAmelCase ( self ) -> Tuple:
# Initialize image_processing
lowercase__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
lowercase__ : List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase__ : Any = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _UpperCAmelCase ( self ) -> Dict:
# Initialize image_processing
lowercase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
lowercase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase__ : Tuple = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 645
|
"""simple docstring"""
import math
def a_ ( _lowerCAmelCase : int = 100 ):
'''simple docstring'''
lowercase__ : Union[str, Any] = sum(i * i for i in range(1 , n + 1 ) )
lowercase__ : str = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 645
| 1
|
"""simple docstring"""
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
_UpperCamelCase : str = logging.get_logger(__name__)
_UpperCamelCase : Optional[int] = TypeVar("DatasetType", Dataset, IterableDataset)
def a_ ( _lowerCAmelCase : List[DatasetType] , _lowerCAmelCase : Optional[List[float]] = None , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Optional[DatasetInfo] = None , _lowerCAmelCase : Optional[NamedSplit] = None , _lowerCAmelCase : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ):
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('Unable to interleave an empty list of datasets.' )
for i, dataset in enumerate(_lowerCAmelCase ):
if not isinstance(_lowerCAmelCase , (Dataset, IterableDataset) ):
if isinstance(_lowerCAmelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
'is an empty dataset dictionary.' )
raise ValueError(
f"""Dataset at position {i} has at least one split: {list(_lowerCAmelCase )}\n"""
f"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(_lowerCAmelCase ) )}']""" )
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_lowerCAmelCase ).__name__}.""" )
if i == 0:
lowercase__ , lowercase__ : Union[str, Any] = (
(Dataset, IterableDataset) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError(
f"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , info=_lowerCAmelCase , split=_lowerCAmelCase , stopping_strategy=_lowerCAmelCase )
else:
return _interleave_iterable_datasets(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , info=_lowerCAmelCase , split=_lowerCAmelCase , stopping_strategy=_lowerCAmelCase )
def a_ ( _lowerCAmelCase : List[DatasetType] , _lowerCAmelCase : Optional[DatasetInfo] = None , _lowerCAmelCase : Optional[NamedSplit] = None , _lowerCAmelCase : int = 0 , ):
'''simple docstring'''
if not dsets:
raise ValueError('Unable to concatenate an empty list of datasets.' )
for i, dataset in enumerate(_lowerCAmelCase ):
if not isinstance(_lowerCAmelCase , (Dataset, IterableDataset) ):
if isinstance(_lowerCAmelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
'is an empty dataset dictionary.' )
raise ValueError(
f"""Dataset at position {i} has at least one split: {list(_lowerCAmelCase )}\n"""
f"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(_lowerCAmelCase ) )}']""" )
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_lowerCAmelCase ).__name__}.""" )
if i == 0:
lowercase__ , lowercase__ : Dict = (
(Dataset, IterableDataset) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError(
f"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(_lowerCAmelCase , info=_lowerCAmelCase , split=_lowerCAmelCase , axis=_lowerCAmelCase )
else:
return _concatenate_iterable_datasets(_lowerCAmelCase , info=_lowerCAmelCase , split=_lowerCAmelCase , axis=_lowerCAmelCase )
| 645
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ , lowercase__ : str = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-canny' , from_pt=a , dtype=jnp.bfloataa )
lowercase__ , lowercase__ : List[str] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=a , from_pt=a , dtype=jnp.bfloataa )
lowercase__ : List[Any] = controlnet_params
lowercase__ : int = 'bird'
lowercase__ : List[Any] = jax.device_count()
lowercase__ : Dict = pipe.prepare_text_inputs([prompts] * num_samples )
lowercase__ : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' )
lowercase__ : Optional[int] = pipe.prepare_image_inputs([canny_image] * num_samples )
lowercase__ : List[Any] = jax.random.PRNGKey(0 )
lowercase__ : Tuple = jax.random.split(a , jax.device_count() )
lowercase__ : str = replicate(a )
lowercase__ : List[str] = shard(a )
lowercase__ : Dict = shard(a )
lowercase__ : List[Any] = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=5_0 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3)
lowercase__ : Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowercase__ : Tuple = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
lowercase__ : int = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase__ : Optional[Any] = jnp.array(
[0.167_969, 0.116_699, 0.081_543, 0.154_297, 0.132_812, 0.108_887, 0.169_922, 0.169_922, 0.205_078] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ , lowercase__ : int = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-openpose' , from_pt=a , dtype=jnp.bfloataa )
lowercase__ , lowercase__ : Optional[Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=a , from_pt=a , dtype=jnp.bfloataa )
lowercase__ : Optional[Any] = controlnet_params
lowercase__ : List[Any] = 'Chef in the kitchen'
lowercase__ : List[str] = jax.device_count()
lowercase__ : Dict = pipe.prepare_text_inputs([prompts] * num_samples )
lowercase__ : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' )
lowercase__ : Optional[int] = pipe.prepare_image_inputs([pose_image] * num_samples )
lowercase__ : List[str] = jax.random.PRNGKey(0 )
lowercase__ : str = jax.random.split(a , jax.device_count() )
lowercase__ : Optional[Any] = replicate(a )
lowercase__ : Optional[Any] = shard(a )
lowercase__ : List[Any] = shard(a )
lowercase__ : List[Any] = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=5_0 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3)
lowercase__ : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowercase__ : List[str] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
lowercase__ : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase__ : str = jnp.array(
[[0.271_484, 0.261_719, 0.275_391, 0.277_344, 0.279_297, 0.291_016, 0.294_922, 0.302_734, 0.302_734]] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 645
| 1
|
"""simple docstring"""
from __future__ import annotations
class UpperCAmelCase_ :
def __init__( self , a ) -> Dict:
lowercase__ : Optional[Any] = TypeError(
'Matrices must be formed from a list of zero or more lists containing at '
'least one and the same number of values, each of which must be of type '
'int or float.' )
if len(a ) != 0:
lowercase__ : Optional[int] = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(a ) != cols:
raise error
for value in row:
if not isinstance(a , (int, float) ):
raise error
lowercase__ : Tuple = rows
else:
lowercase__ : int = []
def _UpperCAmelCase ( self ) -> list[list[int]]:
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def _UpperCAmelCase ( self ) -> int:
return len(self.rows )
@property
def _UpperCAmelCase ( self ) -> int:
return len(self.rows[0] )
@property
def _UpperCAmelCase ( self ) -> tuple[int, int]:
return (self.num_rows, self.num_columns)
@property
def _UpperCAmelCase ( self ) -> bool:
return self.order[0] == self.order[1]
def _UpperCAmelCase ( self ) -> Matrix:
lowercase__ : Any = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(a )
def _UpperCAmelCase ( self ) -> int:
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def _UpperCAmelCase ( self ) -> bool:
return bool(self.determinant() )
def _UpperCAmelCase ( self , a , a ) -> int:
lowercase__ : Optional[Any] = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(a ).determinant()
def _UpperCAmelCase ( self , a , a ) -> int:
if (row + column) % 2 == 0:
return self.get_minor(a , a )
return -1 * self.get_minor(a , a )
def _UpperCAmelCase ( self ) -> Matrix:
return Matrix(
[
[self.get_minor(a , a ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def _UpperCAmelCase ( self ) -> Matrix:
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def _UpperCAmelCase ( self ) -> Matrix:
lowercase__ : Optional[int] = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(a )
def _UpperCAmelCase ( self ) -> Matrix:
lowercase__ : List[Any] = self.determinant()
if not determinant:
raise TypeError('Only matrices with a non-zero determinant have an inverse' )
return self.adjugate() * (1 / determinant)
def __repr__( self ) -> str:
return str(self.rows )
def __str__( self ) -> str:
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'[' + '. '.join([str(a ) for value in row] ) + '.]'
for row in self.rows
] )
+ "]"
)
def _UpperCAmelCase ( self , a , a = None ) -> None:
lowercase__ : str = TypeError('Row must be a list containing all ints and/or floats' )
if not isinstance(a , a ):
raise type_error
for value in row:
if not isinstance(a , (int, float) ):
raise type_error
if len(a ) != self.num_columns:
raise ValueError(
'Row must be equal in length to the other rows in the matrix' )
if position is None:
self.rows.append(a )
else:
lowercase__ : int = self.rows[0:position] + [row] + self.rows[position:]
def _UpperCAmelCase ( self , a , a = None ) -> None:
lowercase__ : Union[str, Any] = TypeError(
'Column must be a list containing all ints and/or floats' )
if not isinstance(a , a ):
raise type_error
for value in column:
if not isinstance(a , (int, float) ):
raise type_error
if len(a ) != self.num_rows:
raise ValueError(
'Column must be equal in length to the other columns in the matrix' )
if position is None:
lowercase__ : Dict = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
lowercase__ : str = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self , a ) -> bool:
if not isinstance(a , a ):
return NotImplemented
return self.rows == other.rows
def __ne__( self , a ) -> bool:
return not self == other
def __neg__( self ) -> Matrix:
return self * -1
def __add__( self , a ) -> Matrix:
if self.order != other.order:
raise ValueError('Addition requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self , a ) -> Matrix:
if self.order != other.order:
raise ValueError('Subtraction requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self , a ) -> Matrix:
if isinstance(a , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(a , a ):
if self.num_columns != other.num_rows:
raise ValueError(
'The number of columns in the first matrix must '
'be equal to the number of rows in the second' )
return Matrix(
[
[Matrix.dot_product(a , a ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'A Matrix can only be multiplied by an int, float, or another matrix' )
def __pow__( self , a ) -> Matrix:
if not isinstance(a , a ):
raise TypeError('A Matrix can only be raised to the power of an int' )
if not self.is_square:
raise ValueError('Only square matrices can be raised to a power' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'Only invertable matrices can be raised to a negative power' )
lowercase__ : Tuple = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def _UpperCAmelCase ( cls , a , a ) -> int:
return sum(row[i] * column[i] for i in range(len(a ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 645
|
"""simple docstring"""
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 645
| 1
|
"""simple docstring"""
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def a_ ( ):
'''simple docstring'''
lowercase__ : Optional[int] = 10
lowercase__ : Union[str, Any] = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
lowercase__ : str = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [97], 'text': ['1976']}] * 10,
'id': list(range(_lowerCAmelCase ) ),
} , features=_lowerCAmelCase , )
return dataset
@pytest.fixture(scope='session' )
def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
lowercase__ : Any = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=_lowerCAmelCase )
return filename
# FILE_CONTENT + files
_UpperCamelCase : List[str] = "\\n Text data.\n Second line of data."
@pytest.fixture(scope='session' )
def a_ ( _lowerCAmelCase : Dict ):
'''simple docstring'''
lowercase__ : str = tmp_path_factory.mktemp('data' ) / 'file.txt'
lowercase__ : str = FILE_CONTENT
with open(_lowerCAmelCase , 'w' ) as f:
f.write(_lowerCAmelCase )
return filename
@pytest.fixture(scope='session' )
def a_ ( _lowerCAmelCase : Optional[int] ):
'''simple docstring'''
import bza
lowercase__ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
lowercase__ : int = bytes(_lowerCAmelCase , 'utf-8' )
with bza.open(_lowerCAmelCase , 'wb' ) as f:
f.write(_lowerCAmelCase )
return path
@pytest.fixture(scope='session' )
def a_ ( _lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
import gzip
lowercase__ : Any = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
lowercase__ : int = bytes(_lowerCAmelCase , 'utf-8' )
with gzip.open(_lowerCAmelCase , 'wb' ) as f:
f.write(_lowerCAmelCase )
return path
@pytest.fixture(scope='session' )
def a_ ( _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
if datasets.config.LZ4_AVAILABLE:
import lza.frame
lowercase__ : str = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
lowercase__ : Union[str, Any] = bytes(_lowerCAmelCase , 'utf-8' )
with lza.frame.open(_lowerCAmelCase , 'wb' ) as f:
f.write(_lowerCAmelCase )
return path
@pytest.fixture(scope='session' )
def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : int ):
'''simple docstring'''
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
lowercase__ : str = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(_lowerCAmelCase , 'w' ) as archive:
archive.write(_lowerCAmelCase , arcname=os.path.basename(_lowerCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def a_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[str] ):
'''simple docstring'''
import tarfile
lowercase__ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(_lowerCAmelCase , 'w' ) as f:
f.add(_lowerCAmelCase , arcname=os.path.basename(_lowerCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def a_ ( _lowerCAmelCase : Any ):
'''simple docstring'''
import lzma
lowercase__ : List[str] = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
lowercase__ : str = bytes(_lowerCAmelCase , 'utf-8' )
with lzma.open(_lowerCAmelCase , 'wb' ) as f:
f.write(_lowerCAmelCase )
return path
@pytest.fixture(scope='session' )
def a_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] ):
'''simple docstring'''
import zipfile
lowercase__ : Any = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(_lowerCAmelCase , 'w' ) as f:
f.write(_lowerCAmelCase , arcname=os.path.basename(_lowerCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def a_ ( _lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
lowercase__ : int = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
lowercase__ : Union[str, Any] = bytes(_lowerCAmelCase , 'utf-8' )
with zstd.open(_lowerCAmelCase , 'wb' ) as f:
f.write(_lowerCAmelCase )
return path
@pytest.fixture(scope='session' )
def a_ ( _lowerCAmelCase : Dict ):
'''simple docstring'''
lowercase__ : str = tmp_path_factory.mktemp('data' ) / 'file.xml'
lowercase__ : int = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(_lowerCAmelCase , 'w' ) as f:
f.write(_lowerCAmelCase )
return filename
_UpperCamelCase : Tuple = [
{"col_1": "0", "col_2": 0, "col_3": 0.0},
{"col_1": "1", "col_2": 1, "col_3": 1.0},
{"col_1": "2", "col_2": 2, "col_3": 2.0},
{"col_1": "3", "col_2": 3, "col_3": 3.0},
]
_UpperCamelCase : List[str] = [
{"col_1": "4", "col_2": 4, "col_3": 4.0},
{"col_1": "5", "col_2": 5, "col_3": 5.0},
]
_UpperCamelCase : int = {
"col_1": ["0", "1", "2", "3"],
"col_2": [0, 1, 2, 3],
"col_3": [0.0, 1.0, 2.0, 3.0],
}
_UpperCamelCase : List[str] = [
{"col_3": 0.0, "col_1": "0", "col_2": 0},
{"col_3": 1.0, "col_1": "1", "col_2": 1},
]
_UpperCamelCase : Tuple = [
{"col_1": "s0", "col_2": 0, "col_3": 0.0},
{"col_1": "s1", "col_2": 1, "col_3": 1.0},
{"col_1": "s2", "col_2": 2, "col_3": 2.0},
{"col_1": "s3", "col_2": 3, "col_3": 3.0},
]
@pytest.fixture(scope='session' )
def a_ ( ):
'''simple docstring'''
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def a_ ( _lowerCAmelCase : List[str] ):
'''simple docstring'''
lowercase__ : Dict = datasets.Dataset.from_dict(_lowerCAmelCase )
lowercase__ : Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=_lowerCAmelCase )
return path
@pytest.fixture(scope='session' )
def a_ ( _lowerCAmelCase : Any ):
'''simple docstring'''
lowercase__ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(_lowerCAmelCase ) ) as con:
lowercase__ : Union[str, Any] = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def a_ ( _lowerCAmelCase : Optional[int] ):
'''simple docstring'''
lowercase__ : str = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(_lowerCAmelCase , 'w' , newline='' ) as f:
lowercase__ : List[Any] = csv.DictWriter(_lowerCAmelCase , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(_lowerCAmelCase )
return path
@pytest.fixture(scope='session' )
def a_ ( _lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
lowercase__ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(_lowerCAmelCase , 'w' , newline='' ) as f:
lowercase__ : List[str] = csv.DictWriter(_lowerCAmelCase , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(_lowerCAmelCase )
return path
@pytest.fixture(scope='session' )
def a_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] ):
'''simple docstring'''
import bza
lowercase__ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(_lowerCAmelCase , 'rb' ) as f:
lowercase__ : Tuple = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(_lowerCAmelCase , 'wb' ) as f:
f.write(_lowerCAmelCase )
return path
@pytest.fixture(scope='session' )
def a_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Dict ):
'''simple docstring'''
lowercase__ : List[str] = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(_lowerCAmelCase , 'w' ) as f:
f.write(_lowerCAmelCase , arcname=os.path.basename(_lowerCAmelCase ) )
f.write(_lowerCAmelCase , arcname=os.path.basename(_lowerCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def a_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any] ):
'''simple docstring'''
lowercase__ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(_lowerCAmelCase , 'w' ) as f:
f.write(_lowerCAmelCase , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(_lowerCAmelCase , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : str = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(_lowerCAmelCase , 'w' ) as f:
f.write(_lowerCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(_lowerCAmelCase ) ) )
f.write(_lowerCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(_lowerCAmelCase ) ) )
return path
@pytest.fixture(scope='session' )
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
lowercase__ : List[str] = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(_lowerCAmelCase , 'wb' ) as f:
lowercase__ : Optional[int] = pq.ParquetWriter(_lowerCAmelCase , schema=_lowerCAmelCase )
lowercase__ : int = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(_lowerCAmelCase ) )] for k in DATA[0]} , schema=_lowerCAmelCase )
writer.write_table(_lowerCAmelCase )
writer.close()
return path
@pytest.fixture(scope='session' )
def a_ ( _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : int = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
lowercase__ : List[str] = {'data': DATA}
with open(_lowerCAmelCase , 'w' ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
return path
@pytest.fixture(scope='session' )
def a_ ( _lowerCAmelCase : Dict ):
'''simple docstring'''
lowercase__ : List[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
lowercase__ : List[str] = {'data': DATA_DICT_OF_LISTS}
with open(_lowerCAmelCase , 'w' ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
return path
@pytest.fixture(scope='session' )
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : List[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(_lowerCAmelCase , 'w' ) as f:
for item in DATA:
f.write(json.dumps(_lowerCAmelCase ) + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( _lowerCAmelCase : List[Any] ):
'''simple docstring'''
lowercase__ : str = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(_lowerCAmelCase , 'w' ) as f:
for item in DATA:
f.write(json.dumps(_lowerCAmelCase ) + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( _lowerCAmelCase : Any ):
'''simple docstring'''
lowercase__ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(_lowerCAmelCase , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(_lowerCAmelCase ) + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( _lowerCAmelCase : Tuple ):
'''simple docstring'''
lowercase__ : Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(_lowerCAmelCase , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(_lowerCAmelCase ) + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : Dict ):
'''simple docstring'''
import gzip
lowercase__ : Any = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(_lowerCAmelCase , 'rb' ) as orig_file:
with gzip.open(_lowerCAmelCase , 'wb' ) as zipped_file:
zipped_file.writelines(_lowerCAmelCase )
return path
@pytest.fixture(scope='session' )
def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : Tuple ):
'''simple docstring'''
import gzip
lowercase__ : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(_lowerCAmelCase , 'rb' ) as orig_file:
with gzip.open(_lowerCAmelCase , 'wb' ) as zipped_file:
zipped_file.writelines(_lowerCAmelCase )
return path
@pytest.fixture(scope='session' )
def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] ):
'''simple docstring'''
lowercase__ : int = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(_lowerCAmelCase , 'w' ) as f:
f.write(_lowerCAmelCase , arcname=os.path.basename(_lowerCAmelCase ) )
f.write(_lowerCAmelCase , arcname=os.path.basename(_lowerCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(_lowerCAmelCase , 'w' ) as f:
f.write(_lowerCAmelCase , arcname=os.path.join('nested' , os.path.basename(_lowerCAmelCase ) ) )
return path
@pytest.fixture(scope='session' )
def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int] ):
'''simple docstring'''
lowercase__ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(_lowerCAmelCase , 'w' ) as f:
f.write(_lowerCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(_lowerCAmelCase ) ) )
f.write(_lowerCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(_lowerCAmelCase ) ) )
return path
@pytest.fixture(scope='session' )
def a_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(_lowerCAmelCase , 'w' ) as f:
f.add(_lowerCAmelCase , arcname=os.path.basename(_lowerCAmelCase ) )
f.add(_lowerCAmelCase , arcname=os.path.basename(_lowerCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def a_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : List[Any] ):
'''simple docstring'''
lowercase__ : List[str] = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(_lowerCAmelCase , 'w' ) as f:
f.add(_lowerCAmelCase , arcname=os.path.join('nested' , os.path.basename(_lowerCAmelCase ) ) )
return path
@pytest.fixture(scope='session' )
def a_ ( _lowerCAmelCase : Optional[int] ):
'''simple docstring'''
lowercase__ : Dict = ['0', '1', '2', '3']
lowercase__ : Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(_lowerCAmelCase , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : Optional[Any] = ['0', '1', '2', '3']
lowercase__ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(_lowerCAmelCase , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( _lowerCAmelCase : Dict ):
'''simple docstring'''
lowercase__ : int = ['0', '1', '2', '3']
lowercase__ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(_lowerCAmelCase , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(_lowerCAmelCase , 'w' ) as f:
f.write(_lowerCAmelCase , arcname=os.path.basename(_lowerCAmelCase ) )
f.write(_lowerCAmelCase , arcname=os.path.basename(_lowerCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : Any = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(_lowerCAmelCase , 'w' ) as f:
f.write(_lowerCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(_lowerCAmelCase ) ) )
f.write(_lowerCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(_lowerCAmelCase ) ) )
return path
@pytest.fixture(scope='session' )
def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] ):
'''simple docstring'''
lowercase__ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(_lowerCAmelCase , 'w' ) as f:
f.write(_lowerCAmelCase , arcname=os.path.basename('unsupported.ext' ) )
f.write(_lowerCAmelCase , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def a_ ( _lowerCAmelCase : List[str] ):
'''simple docstring'''
lowercase__ : Optional[int] = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
lowercase__ : List[str] = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(_lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(_lowerCAmelCase )
return path
@pytest.fixture(scope='session' )
def a_ ( ):
'''simple docstring'''
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def a_ ( ):
'''simple docstring'''
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def a_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(_lowerCAmelCase , 'w' ) as f:
f.write(_lowerCAmelCase , arcname=os.path.basename(_lowerCAmelCase ) )
f.write(_lowerCAmelCase , arcname=os.path.basename(_lowerCAmelCase ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : Any = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
return data_dir
| 645
|
"""simple docstring"""
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Optional[Any] = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
lowercase__ : Union[str, Any] = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
model.to(a )
from datasets import load_dataset
lowercase__ : str = load_dataset('nielsr/rvlcdip-demo' )
lowercase__ : Tuple = dataset['train'][0]['image'].convert('RGB' )
lowercase__ : int = image_processor(a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ : List[str] = model(**a )
lowercase__ : List[Any] = outputs.logits
lowercase__ : Union[str, Any] = torch.Size((1, 1_6) )
self.assertEqual(logits.shape , a )
lowercase__ : Tuple = torch.tensor(
[-0.4_158, -0.4_092, -0.4_347] , device=a , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , a , atol=1e-4 ) )
| 645
| 1
|
"""simple docstring"""
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
_UpperCamelCase : Union[str, Any] = open # noqa: we just need to have a builtin inside this module to test it properly
| 645
|
"""simple docstring"""
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase_ :
@staticmethod
def _UpperCAmelCase ( *a , **a ) -> int:
pass
def a_ ( _lowerCAmelCase : Image ):
'''simple docstring'''
lowercase__ : List[str] = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
lowerCamelCase__ : Union[str, Any] = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def _UpperCAmelCase ( self , a , a , a ) -> Dict:
lowercase__ : Union[str, Any] = DepthEstimationPipeline(model=a , image_processor=a )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def _UpperCAmelCase ( self , a , a ) -> Optional[int]:
lowercase__ : Tuple = depth_estimator('./tests/fixtures/tests_samples/COCO/000000039769.png' )
self.assertEqual({'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )} , a )
import datasets
lowercase__ : Tuple = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' )
lowercase__ : List[Any] = depth_estimator(
[
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
] )
self.assertEqual(
[
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
] , a , )
@require_tf
@unittest.skip('Depth estimation is not implemented in TF' )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
@slow
@require_torch
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Tuple = 'Intel/dpt-large'
lowercase__ : Optional[int] = pipeline('depth-estimation' , model=a )
lowercase__ : List[Any] = depth_estimator('http://images.cocodataset.org/val2017/000000039769.jpg' )
lowercase__ : Optional[Any] = hashimage(outputs['depth'] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['predicted_depth'].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs['predicted_depth'].min().item() ) , 2.662 )
@require_torch
def _UpperCAmelCase ( self ) -> Optional[int]:
# This is highly irregular to have no small tests.
self.skipTest('There is not hf-internal-testing tiny model for either GLPN nor DPT' )
| 645
| 1
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase_ ( _a , unittest.TestCase):
lowerCamelCase__ : str = KandinskyImgaImgPipeline
lowerCamelCase__ : List[str] = ["prompt", "image_embeds", "negative_image_embeds", "image"]
lowerCamelCase__ : Optional[int] = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
]
lowerCamelCase__ : Union[str, Any] = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
lowerCamelCase__ : Optional[int] = False
@property
def _UpperCAmelCase ( self ) -> List[str]:
return 3_2
@property
def _UpperCAmelCase ( self ) -> List[Any]:
return 3_2
@property
def _UpperCAmelCase ( self ) -> Optional[int]:
return self.time_input_dim
@property
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return self.time_input_dim * 4
@property
def _UpperCAmelCase ( self ) -> int:
return 1_0_0
@property
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : int = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def _UpperCAmelCase ( self ) -> Optional[int]:
torch.manual_seed(0 )
lowercase__ : Optional[Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , )
lowercase__ : Tuple = MultilingualCLIP(a )
lowercase__ : List[str] = text_encoder.eval()
return text_encoder
@property
def _UpperCAmelCase ( self ) -> Optional[Any]:
torch.manual_seed(0 )
lowercase__ : List[str] = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowercase__ : Any = UNetaDConditionModel(**a )
return model
@property
def _UpperCAmelCase ( self ) -> Optional[Any]:
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _UpperCAmelCase ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
lowercase__ : Any = VQModel(**self.dummy_movq_kwargs )
return model
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : List[Any] = self.dummy_text_encoder
lowercase__ : int = self.dummy_tokenizer
lowercase__ : Tuple = self.dummy_unet
lowercase__ : Tuple = self.dummy_movq
lowercase__ : Union[str, Any] = {
'num_train_timesteps': 1_0_0_0,
'beta_schedule': 'linear',
'beta_start': 0.00_085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
lowercase__ : Dict = DDIMScheduler(**a )
lowercase__ : int = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def _UpperCAmelCase ( self , a , a=0 ) -> Tuple:
lowercase__ : Any = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(a ) ).to(a )
lowercase__ : List[str] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(a )
# create init_image
lowercase__ : str = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(a ) ).to(a )
lowercase__ : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase__ : List[Any] = Image.fromarray(np.uinta(a ) ).convert('RGB' ).resize((2_5_6, 2_5_6) )
if str(a ).startswith('mps' ):
lowercase__ : Tuple = torch.manual_seed(a )
else:
lowercase__ : Tuple = torch.Generator(device=a ).manual_seed(a )
lowercase__ : str = {
'prompt': 'horse',
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 6_4,
'width': 6_4,
'num_inference_steps': 1_0,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Any = 'cpu'
lowercase__ : List[str] = self.get_dummy_components()
lowercase__ : List[str] = self.pipeline_class(**a )
lowercase__ : Dict = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase__ : Optional[int] = pipe(**self.get_dummy_inputs(a ) )
lowercase__ : Tuple = output.images
lowercase__ : Union[str, Any] = pipe(
**self.get_dummy_inputs(a ) , return_dict=a , )[0]
lowercase__ : Dict = image[0, -3:, -3:, -1]
lowercase__ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowercase__ : Tuple = np.array(
[0.61_474_943, 0.6_073_539, 0.43_308_544, 0.5_928_269, 0.47_493_595, 0.46_755_973, 0.4_613_838, 0.45_368_797, 0.50_119_233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_img2img_frog.npy' )
lowercase__ : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
lowercase__ : str = 'A red cartoon frog, 4k'
lowercase__ : Dict = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa )
pipe_prior.to(a )
lowercase__ : int = KandinskyImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1' , torch_dtype=torch.floataa )
lowercase__ : Union[str, Any] = pipeline.to(a )
pipeline.set_progress_bar_config(disable=a )
lowercase__ : str = torch.Generator(device='cpu' ).manual_seed(0 )
lowercase__ , lowercase__ : str = pipe_prior(
a , generator=a , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
lowercase__ : Optional[Any] = pipeline(
a , image=a , image_embeds=a , negative_image_embeds=a , generator=a , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , strength=0.2 , output_type='np' , )
lowercase__ : Union[str, Any] = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(a , a )
| 645
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class UpperCAmelCase_ ( _a):
def __init__( self ) -> Any:
lowercase__ : Tuple = []
def _UpperCAmelCase ( self , a , a , a , **a ) -> Any:
self.events.append('on_init_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Optional[int]:
self.events.append('on_train_begin' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> List[str]:
self.events.append('on_train_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> int:
self.events.append('on_epoch_begin' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Optional[Any]:
self.events.append('on_epoch_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> int:
self.events.append('on_step_begin' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> str:
self.events.append('on_step_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> int:
self.events.append('on_evaluate' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Tuple:
self.events.append('on_predict' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Union[str, Any]:
self.events.append('on_save' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> List[str]:
self.events.append('on_log' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Any:
self.events.append('on_prediction_step' )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self ) -> str:
lowercase__ : str = tempfile.mkdtemp()
def _UpperCAmelCase ( self ) -> Dict:
shutil.rmtree(self.output_dir )
def _UpperCAmelCase ( self , a=0 , a=0 , a=6_4 , a=6_4 , a=None , a=False , **a ) -> int:
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
lowercase__ : str = RegressionDataset(length=a )
lowercase__ : Any = RegressionDataset(length=a )
lowercase__ : Optional[Any] = RegressionModelConfig(a=a , b=a )
lowercase__ : Union[str, Any] = RegressionPreTrainedModel(a )
lowercase__ : Tuple = TrainingArguments(self.output_dir , disable_tqdm=a , report_to=[] , **a )
return Trainer(
a , a , train_dataset=a , eval_dataset=a , callbacks=a , )
def _UpperCAmelCase ( self , a , a ) -> Union[str, Any]:
self.assertEqual(len(a ) , len(a ) )
# Order doesn't matter
lowercase__ : Optional[int] = sorted(a , key=lambda a : cb.__name__ if isinstance(a , a ) else cb.__class__.__name__ )
lowercase__ : Tuple = sorted(a , key=lambda a : cb.__name__ if isinstance(a , a ) else cb.__class__.__name__ )
for cba, cba in zip(a , a ):
if isinstance(a , a ) and isinstance(a , a ):
self.assertEqual(a , a )
elif isinstance(a , a ) and not isinstance(a , a ):
self.assertEqual(a , cba.__class__ )
elif not isinstance(a , a ) and isinstance(a , a ):
self.assertEqual(cba.__class__ , a )
else:
self.assertEqual(a , a )
def _UpperCAmelCase ( self , a ) -> Optional[Any]:
lowercase__ : Dict = ['on_init_end', 'on_train_begin']
lowercase__ : List[Any] = 0
lowercase__ : Optional[int] = len(trainer.get_eval_dataloader() )
lowercase__ : Tuple = ['on_prediction_step'] * len(trainer.get_eval_dataloader() ) + ['on_log', 'on_evaluate']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('on_epoch_begin' )
for _ in range(a ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('on_log' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('on_save' )
expected_events.append('on_epoch_end' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : int = self.get_trainer()
lowercase__ : str = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
# Callbacks passed at init are added to the default callbacks
lowercase__ : str = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
lowercase__ : List[Any] = self.get_trainer(disable_tqdm=a )
lowercase__ : Optional[Any] = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : int = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
lowercase__ : List[str] = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(a )
expected_callbacks.remove(a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
lowercase__ : Optional[Any] = self.get_trainer()
lowercase__ : List[Any] = trainer.pop_callback(a )
self.assertEqual(cb.__class__ , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
trainer.add_callback(a )
expected_callbacks.insert(0 , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
# We can also add, pop, or remove by instance
lowercase__ : int = self.get_trainer()
lowercase__ : List[str] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(a )
expected_callbacks.remove(a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
lowercase__ : Tuple = self.get_trainer()
lowercase__ : Dict = trainer.callback_handler.callbacks[0]
lowercase__ : Union[str, Any] = trainer.pop_callback(a )
self.assertEqual(a , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
trainer.add_callback(a )
expected_callbacks.insert(0 , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
def _UpperCAmelCase ( self ) -> Tuple:
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='ignore' , category=a )
lowercase__ : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
lowercase__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
# Independent log/save/eval
lowercase__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
lowercase__ : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
lowercase__ : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
lowercase__ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
lowercase__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='steps' )
trainer.train()
lowercase__ : Optional[int] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
lowercase__ : int = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='epoch' )
trainer.train()
lowercase__ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
# A bit of everything
lowercase__ : Any = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=1_0 , eval_steps=5 , evaluation_strategy='steps' , )
trainer.train()
lowercase__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
# warning should be emitted for duplicated callbacks
with patch('transformers.trainer_callback.logger.warning' ) as warn_mock:
lowercase__ : str = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(a ) in warn_mock.call_args[0][0]
| 645
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
_UpperCamelCase : Optional[Any] = {"configuration_dpt": ["DPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPTConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : str = ["DPTFeatureExtractor"]
_UpperCamelCase : Union[str, Any] = ["DPTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Union[str, Any] = [
"DPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DPTForDepthEstimation",
"DPTForSemanticSegmentation",
"DPTModel",
"DPTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
_UpperCamelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 645
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_UpperCamelCase : str = {
"configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Tuple = [
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoForQuestionAnswering",
"GPTNeoForSequenceClassification",
"GPTNeoForTokenClassification",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Dict = [
"FlaxGPTNeoForCausalLM",
"FlaxGPTNeoModel",
"FlaxGPTNeoPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
_UpperCamelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 645
| 1
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
def a_ ( _lowerCAmelCase : list[Any] ):
'''simple docstring'''
create_state_space_tree(_lowerCAmelCase , [] , 0 )
def a_ ( _lowerCAmelCase : list[Any] , _lowerCAmelCase : list[Any] , _lowerCAmelCase : int ):
'''simple docstring'''
if index == len(_lowerCAmelCase ):
print(_lowerCAmelCase )
return
create_state_space_tree(_lowerCAmelCase , _lowerCAmelCase , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(_lowerCAmelCase , _lowerCAmelCase , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
_UpperCamelCase : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["A", "B", "C"])
generate_all_subsequences(seq)
| 645
|
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self , a ) -> str:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
lowercase__ : str = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(a )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Dict = 'sshleifer/tiny-gpt2'
lowercase__ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a , multi_process=a , )
lowercase__ : str = TensorFlowBenchmark(a )
lowercase__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : List[str] = 'sgugger/tiny-distilbert-classification'
lowercase__ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , only_pretrain_model=a , )
lowercase__ : Optional[Any] = TensorFlowBenchmark(a )
lowercase__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : Optional[int] = 'sshleifer/tiny-gpt2'
lowercase__ : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : Optional[Any] = TensorFlowBenchmark(a )
lowercase__ : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : Any = 'sshleifer/tiny-gpt2'
lowercase__ : List[Any] = AutoConfig.from_pretrained(a )
lowercase__ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a , multi_process=a , )
lowercase__ : Tuple = TensorFlowBenchmark(a , [config] )
lowercase__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Optional[Any] = 'sshleifer/tiny-gpt2'
lowercase__ : List[str] = AutoConfig.from_pretrained(a )
lowercase__ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : List[str] = TensorFlowBenchmark(a , [config] )
lowercase__ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Optional[Any] = 'sshleifer/tiny-gpt2'
lowercase__ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : Optional[Any] = TensorFlowBenchmark(a )
lowercase__ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Optional[Any] = 'sshleifer/tiny-gpt2'
lowercase__ : Optional[int] = AutoConfig.from_pretrained(a )
lowercase__ : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : str = TensorFlowBenchmark(a , [config] )
lowercase__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : List[str] = 'patrickvonplaten/t5-tiny-random'
lowercase__ : Any = AutoConfig.from_pretrained(a )
lowercase__ : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : int = TensorFlowBenchmark(a , configs=[config] )
lowercase__ : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 , 'Cannot do xla on CPU.' )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Any = 'sshleifer/tiny-gpt2'
lowercase__ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , use_xla=a , multi_process=a , )
lowercase__ : Any = TensorFlowBenchmark(a )
lowercase__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Any = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=a , save_to_csv=a , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(a , 'inf_time.csv' ) , inference_memory_csv_file=os.path.join(a , 'inf_mem.csv' ) , env_info_csv_file=os.path.join(a , 'env.csv' ) , multi_process=a , )
lowercase__ : Union[str, Any] = TensorFlowBenchmark(a )
benchmark.run()
self.assertTrue(Path(os.path.join(a , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(a , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(a , 'env.csv' ) ).exists() )
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ : Tuple = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(a ):
self.assertTrue(hasattr(a , 'sequential' ) )
self.assertTrue(hasattr(a , 'cumulative' ) )
self.assertTrue(hasattr(a , 'current' ) )
self.assertTrue(hasattr(a , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=a , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(a , 'log.txt' ) , log_print=a , trace_memory_line_by_line=a , eager_mode=a , multi_process=a , )
lowercase__ : Optional[int] = TensorFlowBenchmark(a )
lowercase__ : Optional[Any] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(a , 'log.txt' ) ).exists() )
| 645
| 1
|
"""simple docstring"""
from random import randint, random
def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = False , _lowerCAmelCase : int = 5 , ):
'''simple docstring'''
lowercase__ : Dict = [[-1] * number_of_cells] # Create a highway without any car
lowercase__ : int = 0
lowercase__ : Any = max(_lowerCAmelCase , 0 )
while i < number_of_cells:
lowercase__ : Optional[int] = (
randint(0 , _lowerCAmelCase ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def a_ ( _lowerCAmelCase : list , _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : Optional[Any] = 0
lowercase__ : Tuple = highway_now[car_index + 1 :]
for cell in range(len(_lowerCAmelCase ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(_lowerCAmelCase , -1 )
def a_ ( _lowerCAmelCase : list , _lowerCAmelCase : float , _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : Optional[int] = len(_lowerCAmelCase )
# Beforce calculations, the highway is empty
lowercase__ : Optional[Any] = [-1] * number_of_cells
for car_index in range(_lowerCAmelCase ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
lowercase__ : Tuple = min(highway_now[car_index] + 1 , _lowerCAmelCase )
# Number of empty cell before the next car
lowercase__ : Union[str, Any] = get_distance(_lowerCAmelCase , _lowerCAmelCase ) - 1
# We can't have the car causing an accident
lowercase__ : Optional[int] = min(next_highway[car_index] , _lowerCAmelCase )
if random() < probability:
# Randomly, a driver will slow down
lowercase__ : List[str] = max(next_highway[car_index] - 1 , 0 )
return next_highway
def a_ ( _lowerCAmelCase : list , _lowerCAmelCase : int , _lowerCAmelCase : float , _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : Optional[int] = len(highway[0] )
for i in range(_lowerCAmelCase ):
lowercase__ : Optional[int] = update(highway[i] , _lowerCAmelCase , _lowerCAmelCase )
lowercase__ : Optional[int] = [-1] * number_of_cells
for car_index in range(_lowerCAmelCase ):
lowercase__ : Optional[int] = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
lowercase__ : List[Any] = (car_index + speed) % number_of_cells
# Commit the change of position
lowercase__ : List[str] = speed
highway.append(_lowerCAmelCase )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 645
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class UpperCAmelCase_ ( _a):
def __init__( self , a , a=1_3 , a=7 , a=True , a=True , a=False , a=True , a=9_9 , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=5_1_2 , a=1_6 , a=2 , a=0.02 , a=3 , a=4 , a=None , ) -> Any:
lowercase__ : Tuple = parent
lowercase__ : List[Any] = batch_size
lowercase__ : List[Any] = seq_length
lowercase__ : List[Any] = is_training
lowercase__ : Optional[Any] = use_input_mask
lowercase__ : Optional[int] = use_token_type_ids
lowercase__ : int = use_labels
lowercase__ : Tuple = vocab_size
lowercase__ : int = hidden_size
lowercase__ : Any = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
lowercase__ : Optional[Any] = intermediate_size
lowercase__ : Optional[Any] = hidden_act
lowercase__ : List[str] = hidden_dropout_prob
lowercase__ : List[Any] = attention_probs_dropout_prob
lowercase__ : List[Any] = max_position_embeddings
lowercase__ : List[str] = type_vocab_size
lowercase__ : Tuple = type_sequence_label_size
lowercase__ : List[Any] = initializer_range
lowercase__ : str = num_labels
lowercase__ : Tuple = num_choices
lowercase__ : str = scope
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : str = None
if self.use_input_mask:
lowercase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Dict = None
lowercase__ : Optional[Any] = None
lowercase__ : int = None
if self.use_labels:
lowercase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ : List[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self ) -> Optional[int]:
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Dict:
lowercase__ : Tuple = DistilBertModel(config=a )
model.to(a )
model.eval()
lowercase__ : Any = model(a , a )
lowercase__ : str = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Dict:
lowercase__ : Optional[int] = DistilBertForMaskedLM(config=a )
model.to(a )
model.eval()
lowercase__ : Union[str, Any] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> int:
lowercase__ : Tuple = DistilBertForQuestionAnswering(config=a )
model.to(a )
model.eval()
lowercase__ : Tuple = model(
a , attention_mask=a , start_positions=a , end_positions=a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> List[str]:
lowercase__ : int = self.num_labels
lowercase__ : Dict = DistilBertForSequenceClassification(a )
model.to(a )
model.eval()
lowercase__ : Optional[Any] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Any:
lowercase__ : Any = self.num_labels
lowercase__ : List[str] = DistilBertForTokenClassification(config=a )
model.to(a )
model.eval()
lowercase__ : Any = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Tuple:
lowercase__ : List[Any] = self.num_choices
lowercase__ : Any = DistilBertForMultipleChoice(config=a )
model.to(a )
model.eval()
lowercase__ : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : int = model(
a , attention_mask=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Union[str, Any] = self.prepare_config_and_inputs()
((lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__)) : List[str] = config_and_inputs
lowercase__ : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : List[str] = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCamelCase__ : str = (
{
"feature-extraction": DistilBertModel,
"fill-mask": DistilBertForMaskedLM,
"question-answering": DistilBertForQuestionAnswering,
"text-classification": DistilBertForSequenceClassification,
"token-classification": DistilBertForTokenClassification,
"zero-shot": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : Any = True
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : Optional[Any] = True
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : str = DistilBertModelTester(self )
lowercase__ : int = ConfigTester(self , config_class=a , dim=3_7 )
def _UpperCAmelCase ( self ) -> Dict:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*a )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*a )
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*a )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*a )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*a )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*a )
@slow
def _UpperCAmelCase ( self ) -> str:
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : str = DistilBertModel.from_pretrained(a )
self.assertIsNotNone(a )
@slow
@require_torch_gpu
def _UpperCAmelCase ( self ) -> Any:
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
lowercase__ : Optional[int] = True
lowercase__ : Union[str, Any] = model_class(config=a )
lowercase__ : int = self._prepare_for_class(a , a )
lowercase__ : Tuple = torch.jit.trace(
a , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a , os.path.join(a , 'traced_model.pt' ) )
lowercase__ : Optional[int] = torch.jit.load(os.path.join(a , 'traced_model.pt' ) , map_location=a )
loaded(inputs_dict['input_ids'].to(a ) , inputs_dict['attention_mask'].to(a ) )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : int = DistilBertModel.from_pretrained('distilbert-base-uncased' )
lowercase__ : Union[str, Any] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
lowercase__ : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase__ : Optional[Any] = model(a , attention_mask=a )[0]
lowercase__ : Tuple = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , a )
lowercase__ : List[Any] = torch.tensor(
[[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) )
| 645
| 1
|
"""simple docstring"""
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
_UpperCamelCase : List[Any] = "src/transformers"
_UpperCamelCase : List[str] = "docs/source/en"
_UpperCamelCase : List[Any] = "."
def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] ):
'''simple docstring'''
with open(_lowerCAmelCase , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowercase__ : List[Any] = f.readlines()
# Find the start prompt.
lowercase__ : Dict = 0
while not lines[start_index].startswith(_lowerCAmelCase ):
start_index += 1
start_index += 1
lowercase__ : Any = start_index
while not lines[end_index].startswith(_lowerCAmelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
_UpperCamelCase : List[str] = "Model|Encoder|Decoder|ForConditionalGeneration"
# Regexes that match TF/Flax/PT model names.
_UpperCamelCase : str = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
_UpperCamelCase : Tuple = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_UpperCamelCase : Union[str, Any] = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# This is to make sure the transformers module imported is the one in the repo.
_UpperCamelCase : Union[str, Any] = direct_transformers_import(TRANSFORMERS_PATH)
def a_ ( _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : Optional[int] = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)' , _lowerCAmelCase )
return [m.group(0 ) for m in matches]
def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : Dict = 2 if text == '✅' or text == '❌' else len(_lowerCAmelCase )
lowercase__ : Optional[Any] = (width - text_length) // 2
lowercase__ : int = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def a_ ( ):
'''simple docstring'''
lowercase__ : Tuple = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
lowercase__ : Union[str, Any] = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
lowercase__ : int = {name: config.replace('Config' , '' ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
lowercase__ : List[str] = collections.defaultdict(_lowerCAmelCase )
lowercase__ : Optional[Any] = collections.defaultdict(_lowerCAmelCase )
lowercase__ : Dict = collections.defaultdict(_lowerCAmelCase )
lowercase__ : Optional[Any] = collections.defaultdict(_lowerCAmelCase )
lowercase__ : Optional[Any] = collections.defaultdict(_lowerCAmelCase )
# Let's lookup through all transformers object (once).
for attr_name in dir(_lowerCAmelCase ):
lowercase__ : List[Any] = None
if attr_name.endswith('Tokenizer' ):
lowercase__ : List[Any] = slow_tokenizers
lowercase__ : Any = attr_name[:-9]
elif attr_name.endswith('TokenizerFast' ):
lowercase__ : Any = fast_tokenizers
lowercase__ : Tuple = attr_name[:-13]
elif _re_tf_models.match(_lowerCAmelCase ) is not None:
lowercase__ : Optional[int] = tf_models
lowercase__ : str = _re_tf_models.match(_lowerCAmelCase ).groups()[0]
elif _re_flax_models.match(_lowerCAmelCase ) is not None:
lowercase__ : Union[str, Any] = flax_models
lowercase__ : Any = _re_flax_models.match(_lowerCAmelCase ).groups()[0]
elif _re_pt_models.match(_lowerCAmelCase ) is not None:
lowercase__ : Union[str, Any] = pt_models
lowercase__ : Dict = _re_pt_models.match(_lowerCAmelCase ).groups()[0]
if lookup_dict is not None:
while len(_lowerCAmelCase ) > 0:
if attr_name in model_name_to_prefix.values():
lowercase__ : Union[str, Any] = True
break
# Try again after removing the last word in the name
lowercase__ : str = ''.join(camel_case_split(_lowerCAmelCase )[:-1] )
# Let's build that table!
lowercase__ : str = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
lowercase__ : str = ['Model', 'Tokenizer slow', 'Tokenizer fast', 'PyTorch support', 'TensorFlow support', 'Flax Support']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
lowercase__ : Dict = [len(_lowerCAmelCase ) + 2 for c in columns]
lowercase__ : List[str] = max([len(_lowerCAmelCase ) for name in model_names] ) + 2
# Build the table per se
lowercase__ : Optional[int] = '|' + '|'.join([_center_text(_lowerCAmelCase , _lowerCAmelCase ) for c, w in zip(_lowerCAmelCase , _lowerCAmelCase )] ) + '|\n'
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([':' + '-' * (w - 2) + ':' for w in widths] ) + "|\n"
lowercase__ : Tuple = {True: '✅', False: '❌'}
for name in model_names:
lowercase__ : Tuple = model_name_to_prefix[name]
lowercase__ : Optional[int] = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(_lowerCAmelCase , _lowerCAmelCase ) for l, w in zip(_lowerCAmelCase , _lowerCAmelCase )] ) + "|\n"
return table
def a_ ( _lowerCAmelCase : Dict=False ):
'''simple docstring'''
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Optional[int] = _find_text_in_file(
filename=os.path.join(_lowerCAmelCase , 'index.md' ) , start_prompt='<!--This table is updated automatically from the auto modules' , end_prompt='<!-- End table-->' , )
lowercase__ : List[str] = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(_lowerCAmelCase , 'index.md' ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
'The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.' )
if __name__ == "__main__":
_UpperCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
_UpperCamelCase : str = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 645
|
"""simple docstring"""
from __future__ import annotations
def a_ ( _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float , ):
'''simple docstring'''
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif stress < 0:
raise ValueError('Stress cannot be negative' )
elif tangential_force < 0:
raise ValueError('Tangential Force cannot be negative' )
elif area < 0:
raise ValueError('Area cannot be negative' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 645
| 1
|
"""simple docstring"""
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
_UpperCamelCase : Optional[int] = logging.get_logger(__name__)
_UpperCamelCase : List[str] = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
_UpperCamelCase : int = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
_UpperCamelCase : List[str] = {
"facebook/blenderbot_small-90M": 5_12,
}
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : Any = VOCAB_FILES_NAMES
lowerCamelCase__ : Any = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : List[Any] = BlenderbotSmallTokenizer
def __init__( self , a=None , a=None , a="<|endoftext|>" , a="<|endoftext|>" , a="<|endoftext|>" , a=False , a=True , **a , ) -> str:
super().__init__(
ByteLevelBPETokenizer(
vocab=a , merges=a , add_prefix_space=a , trim_offsets=a , ) , bos_token=a , eos_token=a , unk_token=a , **a , )
lowercase__ : Any = add_prefix_space
def _UpperCAmelCase ( self , a , a=None ) -> Optional[int]:
lowercase__ : List[str] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _UpperCAmelCase ( self , a , a = None ) -> List[int]:
lowercase__ : List[Any] = [self.sep_token_id]
lowercase__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 645
|
"""simple docstring"""
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
def __init__( self , a , a=1_3 , a=[3_0, 3_0] , a=2 , a=3 , a=True , a=True , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=1_0 , a=0.02 , a=3 , a=None , a=8 , a=1_0 , ) -> Any:
lowercase__ : List[str] = parent
lowercase__ : Optional[Any] = batch_size
lowercase__ : Optional[int] = image_size
lowercase__ : List[Any] = patch_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : str = is_training
lowercase__ : Optional[Any] = use_labels
lowercase__ : Optional[Any] = hidden_size
lowercase__ : Dict = num_hidden_layers
lowercase__ : Optional[Any] = num_attention_heads
lowercase__ : Dict = intermediate_size
lowercase__ : List[Any] = hidden_act
lowercase__ : List[Any] = hidden_dropout_prob
lowercase__ : Any = attention_probs_dropout_prob
lowercase__ : Any = type_sequence_label_size
lowercase__ : Dict = initializer_range
lowercase__ : Union[str, Any] = num_labels
lowercase__ : Tuple = scope
lowercase__ : Tuple = n_targets
lowercase__ : Optional[int] = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
lowercase__ : Optional[Any] = (image_size[1] // patch_size) * (image_size[0] // patch_size)
lowercase__ : Tuple = num_patches + 1 + self.num_detection_tokens
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
lowercase__ : Tuple = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
lowercase__ : int = []
for i in range(self.batch_size ):
lowercase__ : Optional[Any] = {}
lowercase__ : Any = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=a )
lowercase__ : List[str] = torch.rand(self.n_targets , 4 , device=a )
labels.append(a )
lowercase__ : Tuple = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> List[Any]:
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def _UpperCAmelCase ( self , a , a , a ) -> int:
lowercase__ : List[str] = YolosModel(config=a )
model.to(a )
model.eval()
lowercase__ : List[Any] = model(a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def _UpperCAmelCase ( self , a , a , a ) -> Union[str, Any]:
lowercase__ : str = YolosForObjectDetection(a )
model.to(a )
model.eval()
lowercase__ : Dict = model(pixel_values=a )
lowercase__ : Tuple = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
lowercase__ : str = model(pixel_values=a , labels=a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : int = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Any = config_and_inputs
lowercase__ : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : Optional[int] = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
lowerCamelCase__ : List[str] = (
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
lowerCamelCase__ : List[Any] = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Tuple = False
lowerCamelCase__ : Union[str, Any] = False
def _UpperCAmelCase ( self , a , a , a=False ) -> Dict:
lowercase__ : List[str] = super()._prepare_for_class(a , a , return_labels=a )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
lowercase__ : Optional[Any] = []
for i in range(self.model_tester.batch_size ):
lowercase__ : Dict = {}
lowercase__ : Dict = torch.ones(
size=(self.model_tester.n_targets,) , device=a , dtype=torch.long )
lowercase__ : Optional[Any] = torch.ones(
self.model_tester.n_targets , 4 , device=a , dtype=torch.float )
labels.append(a )
lowercase__ : Union[str, Any] = labels
return inputs_dict
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : Dict = YolosModelTester(self )
lowercase__ : Optional[int] = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=3_7 )
def _UpperCAmelCase ( self ) -> str:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Optional[Any]:
# YOLOS does not use inputs_embeds
pass
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : int = model_class(a )
lowercase__ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Tuple = [*signature.parameters.keys()]
lowercase__ : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Dict = True
# in YOLOS, the seq_len is different
lowercase__ : Tuple = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
lowercase__ : Optional[int] = True
lowercase__ : str = False
lowercase__ : str = True
lowercase__ : List[str] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Any = model(**self._prepare_for_class(a , a ) )
lowercase__ : str = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ : Optional[int] = True
lowercase__ : List[Any] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Union[str, Any] = model(**self._prepare_for_class(a , a ) )
lowercase__ : List[str] = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowercase__ : Dict = len(a )
# Check attention is always last and order is fine
lowercase__ : Any = True
lowercase__ : int = True
lowercase__ : int = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Any = model(**self._prepare_for_class(a , a ) )
lowercase__ : Optional[Any] = 1
self.assertEqual(out_len + added_hidden_states , len(a ) )
lowercase__ : Tuple = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _UpperCAmelCase ( self ) -> List[str]:
def check_hidden_states_output(a , a , a ):
lowercase__ : str = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : int = model(**self._prepare_for_class(a , a ) )
lowercase__ : int = outputs.hidden_states
lowercase__ : Any = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(a ) , a )
# YOLOS has a different seq_length
lowercase__ : Optional[int] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Any = True
check_hidden_states_output(a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : List[Any] = True
check_hidden_states_output(a , a , a )
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*a )
@slow
def _UpperCAmelCase ( self ) -> Union[str, Any]:
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : int = YolosModel.from_pretrained(a )
self.assertIsNotNone(a )
def a_ ( ):
'''simple docstring'''
lowercase__ : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
@cached_property
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return AutoImageProcessor.from_pretrained('hustvl/yolos-small' ) if is_vision_available() else None
@slow
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Dict = YolosForObjectDetection.from_pretrained('hustvl/yolos-small' ).to(a )
lowercase__ : Tuple = self.default_image_processor
lowercase__ : Optional[int] = prepare_img()
lowercase__ : int = image_processor(images=a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ : int = model(inputs.pixel_values )
# verify outputs
lowercase__ : Tuple = torch.Size((1, 1_0_0, 9_2) )
self.assertEqual(outputs.logits.shape , a )
lowercase__ : Any = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] , device=a , )
lowercase__ : List[str] = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] , device=a )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , a , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , a , atol=1e-4 ) )
# verify postprocessing
lowercase__ : Optional[Any] = image_processor.post_process_object_detection(
a , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
lowercase__ : str = torch.tensor([0.9_994, 0.9_790, 0.9_964, 0.9_972, 0.9_861] ).to(a )
lowercase__ : Any = [7_5, 7_5, 1_7, 6_3, 1_7]
lowercase__ : Optional[int] = torch.tensor([335.0_609, 79.3_848, 375.4_216, 187.2_495] ).to(a )
self.assertEqual(len(results['scores'] ) , 5 )
self.assertTrue(torch.allclose(results['scores'] , a , atol=1e-4 ) )
self.assertSequenceEqual(results['labels'].tolist() , a )
self.assertTrue(torch.allclose(results['boxes'][0, :] , a ) )
| 645
| 1
|
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def a_ ( ):
'''simple docstring'''
print('Making key files...' )
make_key_files('rsa' , 1024 )
print('Key files generation successful.' )
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
print('Generating prime p...' )
lowercase__ : Any = rabinMiller.generate_large_prime(_lowerCAmelCase )
print('Generating prime q...' )
lowercase__ : str = rabinMiller.generate_large_prime(_lowerCAmelCase )
lowercase__ : Any = p * q
print('Generating e that is relatively prime to (p - 1) * (q - 1)...' )
while True:
lowercase__ : Dict = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(_lowerCAmelCase , (p - 1) * (q - 1) ) == 1:
break
print('Calculating d that is mod inverse of e...' )
lowercase__ : Tuple = cryptoMath.find_mod_inverse(_lowerCAmelCase , (p - 1) * (q - 1) )
lowercase__ : Any = (n, e)
lowercase__ : List[str] = (n, d)
return (public_key, private_key)
def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : int ):
'''simple docstring'''
if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ):
print('\nWARNING:' )
print(
f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
'Use a different name or delete these files and re-run this program.' )
sys.exit()
lowercase__ , lowercase__ : str = generate_key(_lowerCAmelCase )
print(f"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(f"""{name}_pubkey.txt""" , 'w' ) as out_file:
out_file.write(f"""{key_size},{public_key[0]},{public_key[1]}""" )
print(f"""Writing private key to file {name}_privkey.txt...""" )
with open(f"""{name}_privkey.txt""" , 'w' ) as out_file:
out_file.write(f"""{key_size},{private_key[0]},{private_key[1]}""" )
if __name__ == "__main__":
main()
| 645
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
_UpperCamelCase : int = logging.get_logger(__name__)
@dataclass
class UpperCAmelCase_ :
def __init__( self , a=False , a=False , a=6.0 , a=None , a=False , a=False , a=None , a="fp4" , a=False , **a , ) -> Tuple:
lowercase__ : str = load_in_abit
lowercase__ : str = load_in_abit
lowercase__ : List[str] = llm_inta_threshold
lowercase__ : Dict = llm_inta_skip_modules
lowercase__ : Tuple = llm_inta_enable_fpaa_cpu_offload
lowercase__ : Any = llm_inta_has_fpaa_weight
lowercase__ : Any = bnb_abit_quant_type
lowercase__ : Dict = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
lowercase__ : Dict = torch.floataa
elif isinstance(a , a ):
lowercase__ : Any = getattr(a , a )
elif isinstance(a , torch.dtype ):
lowercase__ : Any = bnb_abit_compute_dtype
else:
raise ValueError('bnb_4bit_compute_dtype must be a string or a torch.dtype' )
self.post_init()
def _UpperCAmelCase ( self ) -> str:
if not isinstance(self.llm_inta_threshold , a ):
raise ValueError('llm_int8_threshold must be a float' )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , a ):
raise ValueError('llm_int8_skip_modules must be a list of strings' )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , a ):
raise ValueError('llm_int8_enable_fp32_cpu_offload must be a boolean' )
if not isinstance(self.llm_inta_has_fpaa_weight , a ):
raise ValueError('llm_int8_has_fp16_weight must be a boolean' )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError('bnb_4bit_compute_dtype must be torch.dtype' )
if not isinstance(self.bnb_abit_quant_type , a ):
raise ValueError('bnb_4bit_quant_type must be a string' )
if not isinstance(self.bnb_abit_use_double_quant , a ):
raise ValueError('bnb_4bit_use_double_quant must be a boolean' )
if self.load_in_abit and not version.parse(importlib.metadata.version('bitsandbytes' ) ) >= version.parse(
'0.39.0' ):
raise ValueError(
'4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version' )
def _UpperCAmelCase ( self ) -> Tuple:
return self.load_in_abit or self.load_in_abit
def _UpperCAmelCase ( self ) -> List[str]:
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def _UpperCAmelCase ( cls , a , a , **a ) -> Optional[Any]:
lowercase__ : List[Any] = cls(**a )
lowercase__ : Union[str, Any] = []
for key, value in kwargs.items():
if hasattr(a , a ):
setattr(a , a , a )
to_remove.append(a )
for key in to_remove:
kwargs.pop(a , a )
if return_unused_kwargs:
return config, kwargs
else:
return config
def _UpperCAmelCase ( self , a ) -> Dict:
with open(a , 'w' , encoding='utf-8' ) as writer:
lowercase__ : Any = self.to_dict()
lowercase__ : str = json.dumps(a , indent=2 , sort_keys=a ) + '\n'
writer.write(a )
def _UpperCAmelCase ( self ) -> Dict[str, Any]:
lowercase__ : Optional[Any] = copy.deepcopy(self.__dict__ )
lowercase__ : Any = str(output['bnb_4bit_compute_dtype'] ).split('.' )[1]
return output
def __repr__( self ) -> Dict:
return f"""{self.__class__.__name__} {self.to_json_string()}"""
def _UpperCAmelCase ( self , a = True ) -> str:
if use_diff is True:
lowercase__ : List[Any] = self.to_diff_dict()
else:
lowercase__ : List[str] = self.to_dict()
return json.dumps(a , indent=2 , sort_keys=a ) + "\n"
def _UpperCAmelCase ( self ) -> Dict[str, Any]:
lowercase__ : Tuple = self.to_dict()
# get the default config dict
lowercase__ : Optional[Any] = BitsAndBytesConfig().to_dict()
lowercase__ : int = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
lowercase__ : Optional[int] = value
return serializable_config_dict
| 645
| 1
|
"""simple docstring"""
from __future__ import annotations
from typing import Generic, TypeVar
_UpperCamelCase : Any = TypeVar("T")
class UpperCAmelCase_ ( Generic[T]):
def __init__( self , a ) -> None:
lowercase__ : List[Any] = data
lowercase__ : Optional[Any] = self
lowercase__ : Optional[Any] = 0
class UpperCAmelCase_ ( Generic[T]):
def __init__( self ) -> None:
# map from node name to the node object
lowercase__ : dict[T, DisjointSetTreeNode[T]] = {}
def _UpperCAmelCase ( self , a ) -> None:
# create a new set with x as its member
lowercase__ : str = DisjointSetTreeNode(a )
def _UpperCAmelCase ( self , a ) -> DisjointSetTreeNode[T]:
# find the set x belongs to (with path-compression)
lowercase__ : Optional[Any] = self.map[data]
if elem_ref != elem_ref.parent:
lowercase__ : Union[str, Any] = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def _UpperCAmelCase ( self , a , a ) -> None:
# helper function for union operation
if nodea.rank > nodea.rank:
lowercase__ : Optional[int] = nodea
else:
lowercase__ : Optional[Any] = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def _UpperCAmelCase ( self , a , a ) -> None:
# merge 2 disjoint sets
self.link(self.find_set(a ) , self.find_set(a ) )
class UpperCAmelCase_ ( Generic[T]):
def __init__( self ) -> None:
# connections: map from the node to the neighbouring nodes (with weights)
lowercase__ : dict[T, dict[T, int]] = {}
def _UpperCAmelCase ( self , a ) -> None:
# add a node ONLY if its not present in the graph
if node not in self.connections:
lowercase__ : int = {}
def _UpperCAmelCase ( self , a , a , a ) -> None:
# add an edge with the given weight
self.add_node(a )
self.add_node(a )
lowercase__ : Any = weight
lowercase__ : Union[str, Any] = weight
def _UpperCAmelCase ( self ) -> GraphUndirectedWeighted[T]:
lowercase__ : List[str] = []
lowercase__ : Dict = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda a : x[2] )
# creating the disjoint set
lowercase__ : str = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(a )
# MST generation
lowercase__ : int = 0
lowercase__ : Tuple = 0
lowercase__ : Any = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
lowercase__ , lowercase__ , lowercase__ : Tuple = edges[index]
index += 1
lowercase__ : int = disjoint_set.find_set(a )
lowercase__ : Any = disjoint_set.find_set(a )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(a , a , a )
disjoint_set.union(a , a )
return graph
| 645
|
"""simple docstring"""
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_UpperCamelCase : int = 16
_UpperCamelCase : Union[str, Any] = 32
def a_ ( _lowerCAmelCase : Tuple ):
'''simple docstring'''
return int(x / 2**20 )
class UpperCAmelCase_ :
def __enter__( self ) -> Union[str, Any]:
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
lowercase__ : List[str] = torch.cuda.memory_allocated()
return self
def __exit__( self , *a ) -> Any:
gc.collect()
torch.cuda.empty_cache()
lowercase__ : Optional[Any] = torch.cuda.memory_allocated()
lowercase__ : Union[str, Any] = torch.cuda.max_memory_allocated()
lowercase__ : List[Any] = bamb(self.end - self.begin )
lowercase__ : List[Any] = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def a_ ( _lowerCAmelCase : Accelerator , _lowerCAmelCase : int = 16 , _lowerCAmelCase : str = "bert-base-cased" , _lowerCAmelCase : int = 320 , _lowerCAmelCase : int = 160 , ):
'''simple docstring'''
lowercase__ : List[Any] = AutoTokenizer.from_pretrained(_lowerCAmelCase )
lowercase__ : Union[str, Any] = load_dataset(
'glue' , 'mrpc' , split={'train': f"""train[:{n_train}]""", 'validation': f"""validation[:{n_val}]"""} )
def tokenize_function(_lowerCAmelCase : int ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ : List[str] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowercase__ : Union[str, Any] = datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=_lowerCAmelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ : Union[str, Any] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_lowerCAmelCase : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowerCAmelCase , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(_lowerCAmelCase , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
lowercase__ : Dict = DataLoader(
tokenized_datasets['train'] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
lowercase__ : Dict = DataLoader(
tokenized_datasets['validation'] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
return train_dataloader, eval_dataloader
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : List[str] ):
'''simple docstring'''
lowercase__ : List[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ : Optional[int] = config['lr']
lowercase__ : Optional[Any] = int(config['num_epochs'] )
lowercase__ : Optional[Any] = int(config['seed'] )
lowercase__ : int = int(config['batch_size'] )
lowercase__ : Union[str, Any] = args.model_name_or_path
set_seed(_lowerCAmelCase )
lowercase__ , lowercase__ : Tuple = get_dataloaders(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ : List[Any] = AutoModelForSequenceClassification.from_pretrained(_lowerCAmelCase , return_dict=_lowerCAmelCase )
# Instantiate optimizer
lowercase__ : List[Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowercase__ : Optional[Any] = optimizer_cls(params=model.parameters() , lr=_lowerCAmelCase )
if accelerator.state.deepspeed_plugin is not None:
lowercase__ : Optional[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
lowercase__ : List[Any] = 1
lowercase__ : List[Any] = (len(_lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowercase__ : Optional[int] = get_linear_schedule_with_warmup(
optimizer=_lowerCAmelCase , num_warmup_steps=0 , num_training_steps=_lowerCAmelCase , )
else:
lowercase__ : Tuple = DummyScheduler(_lowerCAmelCase , total_num_steps=_lowerCAmelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = accelerator.prepare(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# We need to keep track of how many total steps we have iterated over
lowercase__ : Optional[int] = 0
# We also need to keep track of the stating epoch so files are named properly
lowercase__ : Tuple = 0
# Now we train the model
lowercase__ : Optional[Any] = {}
for epoch in range(_lowerCAmelCase , _lowerCAmelCase ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(_lowerCAmelCase ):
lowercase__ : List[Any] = model(**_lowerCAmelCase )
lowercase__ : Dict = outputs.loss
lowercase__ : int = loss / gradient_accumulation_steps
accelerator.backward(_lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('Memory before entering the train : {}'.format(bamb(tracemalloc.begin ) ) )
accelerator.print('Memory consumed at the end of the train (end-begin): {}'.format(tracemalloc.used ) )
accelerator.print('Peak Memory consumed during the train (max-begin): {}'.format(tracemalloc.peaked ) )
accelerator.print(
'Total Peak Memory consumed during the train (max): {}'.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
lowercase__ : int = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'peak_memory_utilization.json' ) , 'w' ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
def a_ ( ):
'''simple docstring'''
lowercase__ : int = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=_lowerCAmelCase , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_lowerCAmelCase , )
parser.add_argument(
'--output_dir' , type=_lowerCAmelCase , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--peak_memory_upper_bound' , type=_lowerCAmelCase , default=_lowerCAmelCase , help='The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.' , )
parser.add_argument(
'--n_train' , type=_lowerCAmelCase , default=320 , help='Number of training examples to use.' , )
parser.add_argument(
'--n_val' , type=_lowerCAmelCase , default=160 , help='Number of validation examples to use.' , )
parser.add_argument(
'--num_epochs' , type=_lowerCAmelCase , default=1 , help='Number of train epochs.' , )
lowercase__ : Any = parser.parse_args()
lowercase__ : Optional[Any] = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
main()
| 645
| 1
|
"""simple docstring"""
import math
def a_ ( _lowerCAmelCase : int = 100 ):
'''simple docstring'''
lowercase__ : Union[str, Any] = sum(i * i for i in range(1 , n + 1 ) )
lowercase__ : str = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 645
|
"""simple docstring"""
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : Any = [0] * len(_lowerCAmelCase )
for i in range(1 , len(_lowerCAmelCase ) ):
# use last results for better performance - dynamic programming
lowercase__ : List[str] = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
lowercase__ : Dict = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
lowercase__ : Union[str, Any] = j
return prefix_result
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
return max(prefix_function(_lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 645
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase : int = logging.get_logger(__name__)
_UpperCamelCase : Union[str, Any] = {
"google/vivit-b-16x2-kinetics400": (
"https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : Any = "vivit"
def __init__( self , a=2_2_4 , a=3_2 , a=[2, 1_6, 1_6] , a=3 , a=7_6_8 , a=1_2 , a=1_2 , a=3_0_7_2 , a="gelu_fast" , a=0.0 , a=0.0 , a=0.02 , a=1e-06 , a=True , **a , ) -> List[Any]:
lowercase__ : int = hidden_size
lowercase__ : Optional[Any] = num_hidden_layers
lowercase__ : int = num_attention_heads
lowercase__ : List[Any] = intermediate_size
lowercase__ : List[Any] = hidden_act
lowercase__ : int = hidden_dropout_prob
lowercase__ : Tuple = attention_probs_dropout_prob
lowercase__ : int = initializer_range
lowercase__ : Optional[int] = layer_norm_eps
lowercase__ : Any = image_size
lowercase__ : List[Any] = num_frames
lowercase__ : Optional[int] = tubelet_size
lowercase__ : int = num_channels
lowercase__ : Optional[int] = qkv_bias
super().__init__(**a )
| 645
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCAmelCase_ ( unittest.TestCase):
def __init__( self , a , a=7 , a=3 , a=1_8 , a=3_0 , a=4_0_0 , a=True , a=None , a=True , a=None , a=True , ) -> List[str]:
lowercase__ : Tuple = size if size is not None else {'shortest_edge': 2_0}
lowercase__ : Union[str, Any] = crop_size if crop_size is not None else {'height': 1_8, 'width': 1_8}
lowercase__ : Optional[int] = parent
lowercase__ : Optional[int] = batch_size
lowercase__ : str = num_channels
lowercase__ : Any = image_size
lowercase__ : Optional[Any] = min_resolution
lowercase__ : int = max_resolution
lowercase__ : List[Any] = do_resize
lowercase__ : List[str] = size
lowercase__ : str = do_center_crop
lowercase__ : List[Any] = crop_size
lowercase__ : Union[str, Any] = do_flip_channel_order
def _UpperCAmelCase ( self ) -> int:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class UpperCAmelCase_ ( _a , unittest.TestCase):
lowerCamelCase__ : Optional[Any] = MobileViTImageProcessor if is_vision_available() else None
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Tuple = MobileViTImageProcessingTester(self )
@property
def _UpperCAmelCase ( self ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , 'do_resize' ) )
self.assertTrue(hasattr(a , 'size' ) )
self.assertTrue(hasattr(a , 'do_center_crop' ) )
self.assertTrue(hasattr(a , 'center_crop' ) )
self.assertTrue(hasattr(a , 'do_flip_channel_order' ) )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 2_0} )
self.assertEqual(image_processor.crop_size , {'height': 1_8, 'width': 1_8} )
lowercase__ : str = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {'shortest_edge': 4_2} )
self.assertEqual(image_processor.crop_size , {'height': 8_4, 'width': 8_4} )
def _UpperCAmelCase ( self ) -> Tuple:
pass
def _UpperCAmelCase ( self ) -> str:
# Initialize image_processing
lowercase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
lowercase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase__ : List[Any] = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _UpperCAmelCase ( self ) -> Tuple:
# Initialize image_processing
lowercase__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
lowercase__ : List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase__ : Any = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _UpperCAmelCase ( self ) -> Dict:
# Initialize image_processing
lowercase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
lowercase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase__ : Tuple = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 645
| 1
|
"""simple docstring"""
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class UpperCAmelCase_ ( _a , _a):
@register_to_config
def __init__( self , a = 1_2_8 , a = 2_5_6 , a = 2_000.0 , a = 7_6_8 , a = 1_2 , a = 1_2 , a = 6_4 , a = 2_0_4_8 , a = 0.1 , ) -> Optional[int]:
super().__init__()
lowercase__ : Any = nn.Sequential(
nn.Linear(a , d_model * 4 , bias=a ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=a ) , nn.SiLU() , )
lowercase__ : Union[str, Any] = nn.Embedding(a , a )
lowercase__ : Any = False
lowercase__ : List[str] = nn.Linear(a , a , bias=a )
lowercase__ : Dict = nn.Dropout(p=a )
lowercase__ : List[Any] = nn.ModuleList()
for lyr_num in range(a ):
# FiLM conditional T5 decoder
lowercase__ : Tuple = DecoderLayer(d_model=a , d_kv=a , num_heads=a , d_ff=a , dropout_rate=a )
self.decoders.append(a )
lowercase__ : str = TaLayerNorm(a )
lowercase__ : List[Any] = nn.Dropout(p=a )
lowercase__ : List[str] = nn.Linear(a , a , bias=a )
def _UpperCAmelCase ( self , a , a ) -> int:
lowercase__ : Optional[int] = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def _UpperCAmelCase ( self , a , a , a ) -> Optional[Any]:
lowercase__ , lowercase__ , lowercase__ : List[str] = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
lowercase__ : Union[str, Any] = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
lowercase__ : Tuple = self.conditioning_emb(a ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
lowercase__ : Any = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
lowercase__ : int = torch.broadcast_to(
torch.arange(a , device=decoder_input_tokens.device ) , (batch, seq_length) , )
lowercase__ : Union[str, Any] = self.position_encoding(a )
lowercase__ : Union[str, Any] = self.continuous_inputs_projection(a )
inputs += position_encodings
lowercase__ : Tuple = self.dropout(a )
# decoder: No padding present.
lowercase__ : Union[str, Any] = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
lowercase__ : Any = [(x, self.encoder_decoder_mask(a , a )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
lowercase__ : Tuple = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
lowercase__ : Any = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
lowercase__ : List[str] = lyr(
a , conditioning_emb=a , encoder_hidden_states=a , encoder_attention_mask=a , )[0]
lowercase__ : Any = self.decoder_norm(a )
lowercase__ : Dict = self.post_dropout(a )
lowercase__ : Tuple = self.spec_out(a )
return spec_out
class UpperCAmelCase_ ( nn.Module):
def __init__( self , a , a , a , a , a , a=1e-6 ) -> Dict:
super().__init__()
lowercase__ : List[Any] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=a , d_kv=a , num_heads=a , dropout_rate=a ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=a , d_kv=a , num_heads=a , dropout_rate=a , layer_norm_epsilon=a , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=a , d_ff=a , dropout_rate=a , layer_norm_epsilon=a ) )
def _UpperCAmelCase ( self , a , a=None , a=None , a=None , a=None , a=None , ) -> List[Any]:
lowercase__ : List[Any] = self.layer[0](
a , conditioning_emb=a , attention_mask=a , )
if encoder_hidden_states is not None:
lowercase__ : Optional[Any] = torch.where(encoder_attention_mask > 0 , 0 , -1e10 ).to(
encoder_hidden_states.dtype )
lowercase__ : List[str] = self.layer[1](
a , key_value_states=a , attention_mask=a , )
# Apply Film Conditional Feed Forward layer
lowercase__ : Union[str, Any] = self.layer[-1](a , a )
return (hidden_states,)
class UpperCAmelCase_ ( nn.Module):
def __init__( self , a , a , a , a ) -> Union[str, Any]:
super().__init__()
lowercase__ : List[Any] = TaLayerNorm(a )
lowercase__ : List[Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=a )
lowercase__ : List[Any] = Attention(query_dim=a , heads=a , dim_head=a , out_bias=a , scale_qk=a )
lowercase__ : List[Any] = nn.Dropout(a )
def _UpperCAmelCase ( self , a , a=None , a=None , ) -> List[Any]:
# pre_self_attention_layer_norm
lowercase__ : Optional[Any] = self.layer_norm(a )
if conditioning_emb is not None:
lowercase__ : List[Any] = self.FiLMLayer(a , a )
# Self-attention block
lowercase__ : Dict = self.attention(a )
lowercase__ : str = hidden_states + self.dropout(a )
return hidden_states
class UpperCAmelCase_ ( nn.Module):
def __init__( self , a , a , a , a , a ) -> Union[str, Any]:
super().__init__()
lowercase__ : Tuple = Attention(query_dim=a , heads=a , dim_head=a , out_bias=a , scale_qk=a )
lowercase__ : Union[str, Any] = TaLayerNorm(a , eps=a )
lowercase__ : str = nn.Dropout(a )
def _UpperCAmelCase ( self , a , a=None , a=None , ) -> str:
lowercase__ : Dict = self.layer_norm(a )
lowercase__ : int = self.attention(
a , encoder_hidden_states=a , attention_mask=attention_mask.squeeze(1 ) , )
lowercase__ : Optional[int] = hidden_states + self.dropout(a )
return layer_output
class UpperCAmelCase_ ( nn.Module):
def __init__( self , a , a , a , a ) -> Tuple:
super().__init__()
lowercase__ : Optional[Any] = TaDenseGatedActDense(d_model=a , d_ff=a , dropout_rate=a )
lowercase__ : Optional[int] = TaFiLMLayer(in_features=d_model * 4 , out_features=a )
lowercase__ : int = TaLayerNorm(a , eps=a )
lowercase__ : Dict = nn.Dropout(a )
def _UpperCAmelCase ( self , a , a=None ) -> Optional[int]:
lowercase__ : Any = self.layer_norm(a )
if conditioning_emb is not None:
lowercase__ : Dict = self.film(a , a )
lowercase__ : List[Any] = self.DenseReluDense(a )
lowercase__ : Tuple = hidden_states + self.dropout(a )
return hidden_states
class UpperCAmelCase_ ( nn.Module):
def __init__( self , a , a , a ) -> List[str]:
super().__init__()
lowercase__ : Optional[int] = nn.Linear(a , a , bias=a )
lowercase__ : str = nn.Linear(a , a , bias=a )
lowercase__ : List[Any] = nn.Linear(a , a , bias=a )
lowercase__ : Optional[Any] = nn.Dropout(a )
lowercase__ : Optional[int] = NewGELUActivation()
def _UpperCAmelCase ( self , a ) -> Optional[int]:
lowercase__ : int = self.act(self.wi_a(a ) )
lowercase__ : str = self.wi_a(a )
lowercase__ : Union[str, Any] = hidden_gelu * hidden_linear
lowercase__ : Dict = self.dropout(a )
lowercase__ : Optional[int] = self.wo(a )
return hidden_states
class UpperCAmelCase_ ( nn.Module):
def __init__( self , a , a=1e-6 ) -> Tuple:
super().__init__()
lowercase__ : Optional[Any] = nn.Parameter(torch.ones(a ) )
lowercase__ : Union[str, Any] = eps
def _UpperCAmelCase ( self , a ) -> int:
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
lowercase__ : Dict = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=a )
lowercase__ : Optional[Any] = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
lowercase__ : Union[str, Any] = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class UpperCAmelCase_ ( nn.Module):
def _UpperCAmelCase ( self , a ) -> torch.Tensor:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(a , 3.0 )) ))
class UpperCAmelCase_ ( nn.Module):
def __init__( self , a , a ) -> Union[str, Any]:
super().__init__()
lowercase__ : Optional[Any] = nn.Linear(a , out_features * 2 , bias=a )
def _UpperCAmelCase ( self , a , a ) -> str:
lowercase__ : Tuple = self.scale_bias(a )
lowercase__ , lowercase__ : Any = torch.chunk(a , 2 , -1 )
lowercase__ : Dict = x * (1 + scale) + shift
return x
| 645
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class UpperCAmelCase_ ( unittest.TestCase):
def __init__( self , a , a=1_3 , a=7 , a=True , a=True , a=True , a=True , a=9_9 , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=5_1_2 , a=1_6 , a=2 , a=0.02 , a=4 , ) -> Dict:
lowercase__ : Optional[Any] = parent
lowercase__ : Dict = batch_size
lowercase__ : List[Any] = seq_length
lowercase__ : int = is_training
lowercase__ : str = use_attention_mask
lowercase__ : Dict = use_token_type_ids
lowercase__ : Optional[int] = use_labels
lowercase__ : Tuple = vocab_size
lowercase__ : List[str] = hidden_size
lowercase__ : Union[str, Any] = num_hidden_layers
lowercase__ : int = num_attention_heads
lowercase__ : Dict = intermediate_size
lowercase__ : List[str] = hidden_act
lowercase__ : Dict = hidden_dropout_prob
lowercase__ : Tuple = attention_probs_dropout_prob
lowercase__ : List[str] = max_position_embeddings
lowercase__ : int = type_vocab_size
lowercase__ : List[str] = type_sequence_label_size
lowercase__ : Union[str, Any] = initializer_range
lowercase__ : Optional[int] = num_choices
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : str = None
if self.use_attention_mask:
lowercase__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : List[str] = None
if self.use_token_type_ids:
lowercase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ : Any = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Optional[int] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Tuple = config_and_inputs
lowercase__ : Union[str, Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase_ ( _a , unittest.TestCase):
lowerCamelCase__ : Tuple = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Union[str, Any] = FlaxAlbertModelTester(self )
@slow
def _UpperCAmelCase ( self ) -> str:
for model_class_name in self.all_model_classes:
lowercase__ : str = model_class_name.from_pretrained('albert-base-v2' )
lowercase__ : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(a )
@require_flax
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : str = FlaxAlbertModel.from_pretrained('albert-base-v2' )
lowercase__ : Optional[int] = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
lowercase__ : Optional[Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowercase__ : Any = model(a , attention_mask=a )[0]
lowercase__ : Tuple = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , a )
lowercase__ : Optional[Any] = np.array(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) )
| 645
| 1
|
"""simple docstring"""
import os
def a_ ( ):
'''simple docstring'''
with open(os.path.dirname(_lowerCAmelCase ) + '/p022_names.txt' ) as file:
lowercase__ : Union[str, Any] = str(file.readlines()[0] )
lowercase__ : Tuple = names.replace('"' , '' ).split(',' )
names.sort()
lowercase__ : Tuple = 0
lowercase__ : Optional[Any] = 0
for i, name in enumerate(_lowerCAmelCase ):
for letter in name:
name_score += ord(_lowerCAmelCase ) - 64
total_score += (i + 1) * name_score
lowercase__ : Tuple = 0
return total_score
if __name__ == "__main__":
print(solution())
| 645
|
"""simple docstring"""
from collections.abc import Sequence
def a_ ( _lowerCAmelCase : Sequence[float] , _lowerCAmelCase : float ):
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(_lowerCAmelCase ) )
def a_ ( _lowerCAmelCase : Sequence[float] , _lowerCAmelCase : float ):
'''simple docstring'''
lowercase__ : int = 0.0
for coeff in reversed(_lowerCAmelCase ):
lowercase__ : List[Any] = result * x + coeff
return result
if __name__ == "__main__":
_UpperCamelCase : int = (0.0, 0.0, 5.0, 9.3, 7.0)
_UpperCamelCase : Dict = 1_0.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 645
| 1
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_UpperCamelCase : str = logging.get_logger(__name__)
# TODO: upload to AWS
_UpperCamelCase : Tuple = {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : int = "retribert"
def __init__( self , a=3_0_5_2_2 , a=7_6_8 , a=8 , a=1_2 , a=3_0_7_2 , a="gelu" , a=0.1 , a=0.1 , a=5_1_2 , a=2 , a=0.02 , a=1e-12 , a=True , a=1_2_8 , a=0 , **a , ) -> Optional[int]:
super().__init__(pad_token_id=a , **a )
lowercase__ : List[Any] = vocab_size
lowercase__ : List[Any] = hidden_size
lowercase__ : List[Any] = num_hidden_layers
lowercase__ : Optional[int] = num_attention_heads
lowercase__ : List[str] = hidden_act
lowercase__ : List[Any] = intermediate_size
lowercase__ : Any = hidden_dropout_prob
lowercase__ : Optional[Any] = attention_probs_dropout_prob
lowercase__ : Dict = max_position_embeddings
lowercase__ : Any = type_vocab_size
lowercase__ : Union[str, Any] = initializer_range
lowercase__ : Union[str, Any] = layer_norm_eps
lowercase__ : str = share_encoders
lowercase__ : Optional[Any] = projection_dim
| 645
|
"""simple docstring"""
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
_UpperCamelCase : Any = [
{"dataset": "wikipedia", "config_name": "20220301.de"},
{"dataset": "wikipedia", "config_name": "20220301.en"},
{"dataset": "wikipedia", "config_name": "20220301.fr"},
{"dataset": "wikipedia", "config_name": "20220301.frr"},
{"dataset": "wikipedia", "config_name": "20220301.it"},
{"dataset": "wikipedia", "config_name": "20220301.simple"},
{"dataset": "snli", "config_name": "plain_text"},
{"dataset": "eli5", "config_name": "LFQA_reddit"},
{"dataset": "wiki40b", "config_name": "en"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"},
{"dataset": "natural_questions", "config_name": "default"},
]
def a_ ( _lowerCAmelCase : Optional[Any]=True ):
'''simple docstring'''
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_a))
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : str = None
lowerCamelCase__ : Optional[Any] = None
def _UpperCAmelCase ( self , a , a ) -> List[Any]:
with TemporaryDirectory() as tmp_dir:
lowercase__ : List[str] = dataset_module_factory(a , cache_dir=a )
lowercase__ : List[Any] = import_main_class(dataset_module.module_path , dataset=a )
lowercase__ : DatasetBuilder = builder_cls(
cache_dir=a , config_name=a , hash=dataset_module.hash , )
lowercase__ : Union[str, Any] = '/'.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=a ).replace(os.sep , '/' ),
config.DATASET_INFO_FILENAME,
] )
lowercase__ : Union[str, Any] = cached_path(a , cache_dir=a )
self.assertTrue(os.path.exists(a ) )
@pytest.mark.integration
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : Union[str, Any] = tmp_path_factory.mktemp('test_hf_gcp' ) / 'test_wikipedia_simple'
lowercase__ : int = dataset_module_factory('wikipedia' , cache_dir=_lowerCAmelCase )
lowercase__ : Optional[int] = import_main_class(dataset_module.module_path )
lowercase__ : DatasetBuilder = builder_cls(
cache_dir=_lowerCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
lowercase__ : Optional[int] = None
builder_instance.download_and_prepare()
lowercase__ : Optional[int] = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def a_ ( _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : Optional[int] = dataset_module_factory('wikipedia' , cache_dir=_lowerCAmelCase )
lowercase__ : List[str] = import_main_class(dataset_module.module_path , dataset=_lowerCAmelCase )
lowercase__ : DatasetBuilder = builder_cls(
cache_dir=_lowerCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
lowercase__ : Union[str, Any] = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert "train" in ds
assert isinstance(ds['train'] , _lowerCAmelCase )
assert next(iter(ds['train'] ) )
| 645
| 1
|
"""simple docstring"""
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class UpperCAmelCase_ ( unittest.TestCase):
lowerCamelCase__ : Optional[int] = MODEL_FOR_MASKED_LM_MAPPING
lowerCamelCase__ : List[str] = TF_MODEL_FOR_MASKED_LM_MAPPING
def _UpperCAmelCase ( self ) -> List[str]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Dict = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='tf' )
lowercase__ : Tuple = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(a , decimals=6 ) , [
{'sequence': 'My name is grouped', 'score': 2.1e-05, 'token': 3_8_0_1_5, 'token_str': ' grouped'},
{'sequence': 'My name is accuser', 'score': 2.1e-05, 'token': 2_5_5_0_6, 'token_str': ' accuser'},
] , )
lowercase__ : Tuple = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(a , decimals=6 ) , [
{
'sequence': 'The largest city in France is grouped',
'score': 2.1e-05,
'token': 3_8_0_1_5,
'token_str': ' grouped',
},
{
'sequence': 'The largest city in France is accuser',
'score': 2.1e-05,
'token': 2_5_5_0_6,
'token_str': ' accuser',
},
] , )
lowercase__ : str = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(a , decimals=6 ) , [
{'sequence': 'My name is Clara', 'score': 2e-05, 'token': 1_3_6_0_6, 'token_str': ' Clara'},
{'sequence': 'My name is Patrick', 'score': 2e-05, 'token': 3_4_9_9, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 1.9e-05, 'token': 2_9_4_1, 'token_str': ' Te'},
] , )
@require_torch
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : int = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='pt' )
lowercase__ : Optional[Any] = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(a , decimals=6 ) , [
{'sequence': 'My name is Maul', 'score': 2.2e-05, 'token': 3_5_6_7_6, 'token_str': ' Maul'},
{'sequence': 'My name isELS', 'score': 2.2e-05, 'token': 1_6_4_1_6, 'token_str': 'ELS'},
] , )
lowercase__ : Tuple = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(a , decimals=6 ) , [
{
'sequence': 'The largest city in France is Maul',
'score': 2.2e-05,
'token': 3_5_6_7_6,
'token_str': ' Maul',
},
{'sequence': 'The largest city in France isELS', 'score': 2.2e-05, 'token': 1_6_4_1_6, 'token_str': 'ELS'},
] , )
lowercase__ : Any = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(a , decimals=6 ) , [
{'sequence': 'My name is Patrick', 'score': 2.1e-05, 'token': 3_4_9_9, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 2e-05, 'token': 2_9_4_1, 'token_str': ' Te'},
{'sequence': 'My name is Clara', 'score': 2e-05, 'token': 1_3_6_0_6, 'token_str': ' Clara'},
] , )
lowercase__ : List[Any] = unmasker('My name is <mask> <mask>' , top_k=2 )
self.assertEqual(
nested_simplify(a , decimals=6 ) , [
[
{
'score': 2.2e-05,
'token': 3_5_6_7_6,
'token_str': ' Maul',
'sequence': '<s>My name is Maul<mask></s>',
},
{'score': 2.2e-05, 'token': 1_6_4_1_6, 'token_str': 'ELS', 'sequence': '<s>My name isELS<mask></s>'},
],
[
{
'score': 2.2e-05,
'token': 3_5_6_7_6,
'token_str': ' Maul',
'sequence': '<s>My name is<mask> Maul</s>',
},
{'score': 2.2e-05, 'token': 1_6_4_1_6, 'token_str': 'ELS', 'sequence': '<s>My name is<mask>ELS</s>'},
],
] , )
@require_torch_gpu
def _UpperCAmelCase ( self ) -> str:
lowercase__ : str = pipeline('fill-mask' , model='hf-internal-testing/tiny-random-distilbert' , device=0 , framework='pt' )
# convert model to fp16
pipe.model.half()
lowercase__ : List[Any] = pipe('Paris is the [MASK] of France.' )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(a , a )
@slow
@require_torch
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Optional[Any] = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='pt' )
self.run_large_test(a )
@slow
@require_tf
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : Tuple = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='tf' )
self.run_large_test(a )
def _UpperCAmelCase ( self , a ) -> Dict:
lowercase__ : Dict = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(a ) , [
{'sequence': 'My name is John', 'score': 0.008, 'token': 6_1_0, 'token_str': ' John'},
{'sequence': 'My name is Chris', 'score': 0.007, 'token': 1_5_7_3, 'token_str': ' Chris'},
] , )
lowercase__ : Any = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(a ) , [
{
'sequence': 'The largest city in France is Paris',
'score': 0.251,
'token': 2_2_0_1,
'token_str': ' Paris',
},
{
'sequence': 'The largest city in France is Lyon',
'score': 0.214,
'token': 1_2_7_9_0,
'token_str': ' Lyon',
},
] , )
lowercase__ : Tuple = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(a ) , [
{'sequence': 'My name is Patrick', 'score': 0.005, 'token': 3_4_9_9, 'token_str': ' Patrick'},
{'sequence': 'My name is Clara', 'score': 0.000, 'token': 1_3_6_0_6, 'token_str': ' Clara'},
{'sequence': 'My name is Te', 'score': 0.000, 'token': 2_9_4_1, 'token_str': ' Te'},
] , )
@require_torch
def _UpperCAmelCase ( self ) -> str:
lowercase__ : int = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='pt' )
lowercase__ : int = None
lowercase__ : List[str] = None
self.run_pipeline_test(a , [] )
@require_tf
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : Optional[int] = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='tf' )
lowercase__ : Union[str, Any] = None
lowercase__ : Any = None
self.run_pipeline_test(a , [] )
def _UpperCAmelCase ( self , a , a , a ) -> Optional[int]:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest('The provided tokenizer has no mask token, (probably reformer or wav2vec2)' )
lowercase__ : Optional[Any] = FillMaskPipeline(model=a , tokenizer=a )
lowercase__ : Dict = [
f"""This is another {tokenizer.mask_token} test""",
]
return fill_masker, examples
def _UpperCAmelCase ( self , a , a ) -> Any:
lowercase__ : List[Any] = fill_masker.tokenizer
lowercase__ : Optional[int] = fill_masker.model
lowercase__ : Tuple = fill_masker(
f"""This is a {tokenizer.mask_token}""" , )
self.assertEqual(
a , [
{'sequence': ANY(a ), 'score': ANY(a ), 'token': ANY(a ), 'token_str': ANY(a )},
{'sequence': ANY(a ), 'score': ANY(a ), 'token': ANY(a ), 'token_str': ANY(a )},
{'sequence': ANY(a ), 'score': ANY(a ), 'token': ANY(a ), 'token_str': ANY(a )},
{'sequence': ANY(a ), 'score': ANY(a ), 'token': ANY(a ), 'token_str': ANY(a )},
{'sequence': ANY(a ), 'score': ANY(a ), 'token': ANY(a ), 'token_str': ANY(a )},
] , )
lowercase__ : List[Any] = fill_masker([f"""This is a {tokenizer.mask_token}"""] )
self.assertEqual(
a , [
{'sequence': ANY(a ), 'score': ANY(a ), 'token': ANY(a ), 'token_str': ANY(a )},
{'sequence': ANY(a ), 'score': ANY(a ), 'token': ANY(a ), 'token_str': ANY(a )},
{'sequence': ANY(a ), 'score': ANY(a ), 'token': ANY(a ), 'token_str': ANY(a )},
{'sequence': ANY(a ), 'score': ANY(a ), 'token': ANY(a ), 'token_str': ANY(a )},
{'sequence': ANY(a ), 'score': ANY(a ), 'token': ANY(a ), 'token_str': ANY(a )},
] , )
lowercase__ : Optional[int] = fill_masker([f"""This is a {tokenizer.mask_token}""", f"""Another {tokenizer.mask_token} great test."""] )
self.assertEqual(
a , [
[
{'sequence': ANY(a ), 'score': ANY(a ), 'token': ANY(a ), 'token_str': ANY(a )},
{'sequence': ANY(a ), 'score': ANY(a ), 'token': ANY(a ), 'token_str': ANY(a )},
{'sequence': ANY(a ), 'score': ANY(a ), 'token': ANY(a ), 'token_str': ANY(a )},
{'sequence': ANY(a ), 'score': ANY(a ), 'token': ANY(a ), 'token_str': ANY(a )},
{'sequence': ANY(a ), 'score': ANY(a ), 'token': ANY(a ), 'token_str': ANY(a )},
],
[
{'sequence': ANY(a ), 'score': ANY(a ), 'token': ANY(a ), 'token_str': ANY(a )},
{'sequence': ANY(a ), 'score': ANY(a ), 'token': ANY(a ), 'token_str': ANY(a )},
{'sequence': ANY(a ), 'score': ANY(a ), 'token': ANY(a ), 'token_str': ANY(a )},
{'sequence': ANY(a ), 'score': ANY(a ), 'token': ANY(a ), 'token_str': ANY(a )},
{'sequence': ANY(a ), 'score': ANY(a ), 'token': ANY(a ), 'token_str': ANY(a )},
],
] , )
with self.assertRaises(a ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(a ):
fill_masker('This is' )
self.run_test_top_k(a , a )
self.run_test_targets(a , a )
self.run_test_top_k_targets(a , a )
self.fill_mask_with_duplicate_targets_and_top_k(a , a )
self.fill_mask_with_multiple_masks(a , a )
def _UpperCAmelCase ( self , a , a ) -> Tuple:
lowercase__ : Optional[int] = tokenizer.get_vocab()
lowercase__ : Dict = sorted(vocab.keys() )[:2]
# Pipeline argument
lowercase__ : Dict = FillMaskPipeline(model=a , tokenizer=a , targets=a )
lowercase__ : Union[str, Any] = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
a , [
{'sequence': ANY(a ), 'score': ANY(a ), 'token': ANY(a ), 'token_str': ANY(a )},
{'sequence': ANY(a ), 'score': ANY(a ), 'token': ANY(a ), 'token_str': ANY(a )},
] , )
lowercase__ : Any = {vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} , a )
lowercase__ : Optional[Any] = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} , set(a ) )
# Call argument
lowercase__ : str = FillMaskPipeline(model=a , tokenizer=a )
lowercase__ : str = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=a )
self.assertEqual(
a , [
{'sequence': ANY(a ), 'score': ANY(a ), 'token': ANY(a ), 'token_str': ANY(a )},
{'sequence': ANY(a ), 'score': ANY(a ), 'token': ANY(a ), 'token_str': ANY(a )},
] , )
lowercase__ : Dict = {vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} , a )
lowercase__ : Any = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} , set(a ) )
# Score equivalence
lowercase__ : List[Any] = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=a )
lowercase__ : Optional[int] = [top_mask['token_str'] for top_mask in outputs]
lowercase__ : int = [top_mask['score'] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(a ) == set(a ):
lowercase__ : Optional[int] = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=a )
lowercase__ : Dict = [top_mask['score'] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(a ) , nested_simplify(a ) )
# Raises with invalid
with self.assertRaises(a ):
lowercase__ : Optional[int] = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(a ):
lowercase__ : Dict = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[''] )
with self.assertRaises(a ):
lowercase__ : Optional[Any] = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets='' )
def _UpperCAmelCase ( self , a , a ) -> int:
lowercase__ : List[str] = FillMaskPipeline(model=a , tokenizer=a , top_k=2 )
lowercase__ : Optional[int] = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
a , [
{'sequence': ANY(a ), 'score': ANY(a ), 'token': ANY(a ), 'token_str': ANY(a )},
{'sequence': ANY(a ), 'score': ANY(a ), 'token': ANY(a ), 'token_str': ANY(a )},
] , )
lowercase__ : List[Any] = FillMaskPipeline(model=a , tokenizer=a )
lowercase__ : List[str] = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
a , [
{'sequence': ANY(a ), 'score': ANY(a ), 'token': ANY(a ), 'token_str': ANY(a )},
{'sequence': ANY(a ), 'score': ANY(a ), 'token': ANY(a ), 'token_str': ANY(a )},
] , )
self.assertEqual(nested_simplify(a ) , nested_simplify(a ) )
def _UpperCAmelCase ( self , a , a ) -> List[str]:
lowercase__ : Union[str, Any] = tokenizer.get_vocab()
lowercase__ : Dict = FillMaskPipeline(model=a , tokenizer=a )
# top_k=2, ntargets=3
lowercase__ : Union[str, Any] = sorted(vocab.keys() )[:3]
lowercase__ : int = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 , targets=a )
# If we use the most probably targets, and filter differently, we should still
# have the same results
lowercase__ : Optional[int] = [el['token_str'] for el in sorted(a , key=lambda a : x["score"] , reverse=a )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(a ).issubset(a ):
lowercase__ : Optional[int] = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=3 , targets=a )
# They should yield exactly the same result
self.assertEqual(nested_simplify(a ) , nested_simplify(a ) )
def _UpperCAmelCase ( self , a , a ) -> List[str]:
lowercase__ : Dict = FillMaskPipeline(model=a , tokenizer=a )
lowercase__ : int = tokenizer.get_vocab()
# String duplicates + id duplicates
lowercase__ : Optional[Any] = sorted(vocab.keys() )[:3]
lowercase__ : Tuple = [targets[0], targets[1], targets[0], targets[2], targets[1]]
lowercase__ : Dict = fill_masker(f"""My name is {tokenizer.mask_token}""" , targets=a , top_k=1_0 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(a ) , 3 )
def _UpperCAmelCase ( self , a , a ) -> List[str]:
lowercase__ : List[Any] = FillMaskPipeline(model=a , tokenizer=a )
lowercase__ : Tuple = fill_masker(
f"""This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
a , [
[
{'sequence': ANY(a ), 'score': ANY(a ), 'token': ANY(a ), 'token_str': ANY(a )},
{'sequence': ANY(a ), 'score': ANY(a ), 'token': ANY(a ), 'token_str': ANY(a )},
],
[
{'sequence': ANY(a ), 'score': ANY(a ), 'token': ANY(a ), 'token_str': ANY(a )},
{'sequence': ANY(a ), 'score': ANY(a ), 'token': ANY(a ), 'token_str': ANY(a )},
],
[
{'sequence': ANY(a ), 'score': ANY(a ), 'token': ANY(a ), 'token_str': ANY(a )},
{'sequence': ANY(a ), 'score': ANY(a ), 'token': ANY(a ), 'token_str': ANY(a )},
],
] , )
| 645
|
"""simple docstring"""
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def a_ ( _lowerCAmelCase : dict ):
'''simple docstring'''
return (data["data"], data["target"])
def a_ ( _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray ):
'''simple docstring'''
lowercase__ : Any = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(_lowerCAmelCase , _lowerCAmelCase )
# Predict target for test data
lowercase__ : str = xgb.predict(_lowerCAmelCase )
lowercase__ : Union[str, Any] = predictions.reshape(len(_lowerCAmelCase ) , 1 )
return predictions
def a_ ( ):
'''simple docstring'''
lowercase__ : Optional[Any] = fetch_california_housing()
lowercase__ , lowercase__ : str = data_handling(_lowerCAmelCase )
lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = train_test_split(
_lowerCAmelCase , _lowerCAmelCase , test_size=0.2_5 , random_state=1 )
lowercase__ : Any = xgboost(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Error printing
print(f"""Mean Absolute Error : {mean_absolute_error(_lowerCAmelCase , _lowerCAmelCase )}""" )
print(f"""Mean Square Error : {mean_squared_error(_lowerCAmelCase , _lowerCAmelCase )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 645
| 1
|
"""simple docstring"""
def a_ ( _lowerCAmelCase : int = 50 ):
'''simple docstring'''
lowercase__ : Optional[Any] = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 645
|
"""simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class UpperCAmelCase_ :
def __init__( self , a , a=1_3 , a=1_0 , a=3 , a=2 , a=2 , a=2 , a=True , a=True , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=1_0 , a=0.02 , a=0.9 , a=None , ) -> Optional[Any]:
lowercase__ : str = parent
lowercase__ : int = batch_size
lowercase__ : Union[str, Any] = image_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : Dict = patch_size
lowercase__ : Tuple = tubelet_size
lowercase__ : Optional[int] = num_frames
lowercase__ : Optional[int] = is_training
lowercase__ : int = use_labels
lowercase__ : Optional[int] = hidden_size
lowercase__ : Union[str, Any] = num_hidden_layers
lowercase__ : Optional[int] = num_attention_heads
lowercase__ : Any = intermediate_size
lowercase__ : str = hidden_act
lowercase__ : List[Any] = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : Union[str, Any] = type_sequence_label_size
lowercase__ : List[Any] = initializer_range
lowercase__ : str = mask_ratio
lowercase__ : Optional[Any] = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
lowercase__ : Optional[Any] = (image_size // patch_size) ** 2
lowercase__ : str = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
lowercase__ : str = int(mask_ratio * self.seq_length )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : int = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowercase__ : int = None
if self.use_labels:
lowercase__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Dict = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> Tuple:
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self , a , a , a ) -> Optional[int]:
lowercase__ : Dict = VideoMAEModel(config=a )
model.to(a )
model.eval()
lowercase__ : Tuple = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , a , a , a ) -> Union[str, Any]:
lowercase__ : str = VideoMAEForPreTraining(a )
model.to(a )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowercase__ : Any = torch.ones((self.num_masks,) )
lowercase__ : str = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
lowercase__ : Optional[int] = mask.expand(self.batch_size , -1 ).bool()
lowercase__ : str = model(a , a )
# model only returns predictions for masked patches
lowercase__ : str = mask.sum().item()
lowercase__ : int = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Dict = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = config_and_inputs
lowercase__ : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : Tuple = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
lowerCamelCase__ : Optional[int] = (
{"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
lowerCamelCase__ : Any = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : Union[str, Any] = False
lowerCamelCase__ : str = False
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Optional[Any] = VideoMAEModelTester(self )
lowercase__ : Optional[Any] = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=3_7 )
def _UpperCAmelCase ( self , a , a , a=False ) -> Optional[int]:
lowercase__ : Union[str, Any] = copy.deepcopy(a )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowercase__ : Optional[Any] = torch.ones((self.model_tester.num_masks,) )
lowercase__ : Any = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
lowercase__ : Any = mask.expand(self.model_tester.batch_size , -1 ).bool()
lowercase__ : Union[str, Any] = bool_masked_pos.to(a )
if return_labels:
if model_class in [
*get_values(a ),
]:
lowercase__ : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a )
return inputs_dict
def _UpperCAmelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason='VideoMAE does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> Dict:
pass
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : int = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = model_class(a )
lowercase__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[Any] = [*signature.parameters.keys()]
lowercase__ : int = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a )
@slow
def _UpperCAmelCase ( self ) -> str:
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : List[Any] = VideoMAEModel.from_pretrained(a )
self.assertIsNotNone(a )
def _UpperCAmelCase ( self ) -> Optional[Any]:
if not self.has_attentions:
pass
else:
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : str = True
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = self.model_tester.seq_length - self.model_tester.num_masks
lowercase__ : Any = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
lowercase__ : Optional[Any] = True
lowercase__ : int = False
lowercase__ : Any = True
lowercase__ : List[str] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Optional[int] = model(**self._prepare_for_class(a , a ) )
lowercase__ : Dict = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ : str = True
lowercase__ : List[str] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : List[Any] = model(**self._prepare_for_class(a , a ) )
lowercase__ : Optional[Any] = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowercase__ : List[str] = len(a )
# Check attention is always last and order is fine
lowercase__ : Optional[int] = True
lowercase__ : List[str] = True
lowercase__ : int = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : List[str] = model(**self._prepare_for_class(a , a ) )
self.assertEqual(out_len + 1 , len(a ) )
lowercase__ : int = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _UpperCAmelCase ( self ) -> Optional[int]:
def check_hidden_states_output(a , a , a ):
lowercase__ : Optional[int] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Optional[Any] = model(**self._prepare_for_class(a , a ) )
lowercase__ : Optional[int] = outputs.hidden_states
lowercase__ : List[Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(a ) , a )
lowercase__ : Optional[Any] = self.model_tester.seq_length - self.model_tester.num_masks
lowercase__ : Union[str, Any] = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Tuple = True
check_hidden_states_output(a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Union[str, Any] = True
check_hidden_states_output(a , a , a )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _UpperCAmelCase ( self ) -> List[Any]:
pass
def a_ ( ):
'''simple docstring'''
lowercase__ : int = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
lowercase__ : str = np.load(_lowerCAmelCase )
return list(_lowerCAmelCase )
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
@cached_property
def _UpperCAmelCase ( self ) -> Optional[Any]:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Dict = VideoMAEForVideoClassification.from_pretrained('MCG-NJU/videomae-base-finetuned-kinetics' ).to(
a )
lowercase__ : str = self.default_image_processor
lowercase__ : List[str] = prepare_video()
lowercase__ : int = image_processor(a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ : Union[str, Any] = model(**a )
# verify the logits
lowercase__ : str = torch.Size((1, 4_0_0) )
self.assertEqual(outputs.logits.shape , a )
lowercase__ : List[Any] = torch.tensor([0.3_669, -0.0_688, -0.2_421] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 ) )
@slow
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Optional[int] = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' ).to(a )
lowercase__ : Optional[Any] = self.default_image_processor
lowercase__ : List[str] = prepare_video()
lowercase__ : str = image_processor(a , return_tensors='pt' ).to(a )
# add boolean mask, indicating which patches to mask
lowercase__ : Union[str, Any] = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' )
lowercase__ : str = torch.load(a )
# forward pass
with torch.no_grad():
lowercase__ : List[Any] = model(**a )
# verify the logits
lowercase__ : Dict = torch.Size([1, 1_4_0_8, 1_5_3_6] )
lowercase__ : List[str] = torch.tensor(
[[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] , device=a )
self.assertEqual(outputs.logits.shape , a )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , a , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
lowercase__ : List[Any] = torch.tensor([0.5_142] , device=a )
self.assertTrue(torch.allclose(outputs.loss , a , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
lowercase__ : Tuple = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' , norm_pix_loss=a ).to(
a )
with torch.no_grad():
lowercase__ : Any = model(**a )
lowercase__ : List[Any] = torch.tensor(torch.tensor([0.6_469] ) , device=a )
self.assertTrue(torch.allclose(outputs.loss , a , atol=1e-4 ) )
| 645
| 1
|
"""simple docstring"""
from __future__ import annotations
def a_ ( _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float , ):
'''simple docstring'''
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif stress < 0:
raise ValueError('Stress cannot be negative' )
elif tangential_force < 0:
raise ValueError('Tangential Force cannot be negative' )
elif area < 0:
raise ValueError('Area cannot be negative' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 645
|
"""simple docstring"""
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
_UpperCamelCase : Dict = logging.get_logger(__name__)
_UpperCamelCase : List[Any] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"encoder.layer_norm_for_extract": "layer_norm_for_extract",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"label_embs_concat": "label_embeddings_concat",
"mask_emb": "masked_spec_embed",
"spk_proj": "speaker_proj",
}
_UpperCamelCase : List[str] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"label_embeddings_concat",
"speaker_proj",
"layer_norm_for_extract",
]
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple ):
'''simple docstring'''
for attribute in key.split('.' ):
lowercase__ : Dict = getattr(_lowerCAmelCase , _lowerCAmelCase )
if weight_type is not None:
lowercase__ : Optional[int] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape
else:
lowercase__ : Optional[int] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
lowercase__ : Optional[Any] = value
elif weight_type == "weight_g":
lowercase__ : Dict = value
elif weight_type == "weight_v":
lowercase__ : List[str] = value
elif weight_type == "bias":
lowercase__ : Optional[Any] = value
else:
lowercase__ : List[str] = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : Tuple = []
lowercase__ : List[str] = fairseq_model.state_dict()
lowercase__ : Union[str, Any] = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
lowercase__ : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == 'group' , )
lowercase__ : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
lowercase__ : List[Any] = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
lowercase__ : int = True
if "*" in mapped_key:
lowercase__ : Optional[int] = name.split(_lowerCAmelCase )[0].split('.' )[-2]
lowercase__ : List[str] = mapped_key.replace('*' , _lowerCAmelCase )
if "weight_g" in name:
lowercase__ : List[Any] = 'weight_g'
elif "weight_v" in name:
lowercase__ : int = 'weight_v'
elif "bias" in name:
lowercase__ : Dict = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase__ : Union[str, Any] = 'weight'
else:
lowercase__ : int = None
set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def a_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Dict ):
'''simple docstring'''
lowercase__ : int = full_name.split('conv_layers.' )[-1]
lowercase__ : int = name.split('.' )
lowercase__ : int = int(items[0] )
lowercase__ : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
lowercase__ : Union[str, Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
lowercase__ : Optional[int] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
lowercase__ : List[Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
lowercase__ : int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_lowerCAmelCase )
@torch.no_grad()
def a_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : str=None , _lowerCAmelCase : Tuple=True ):
'''simple docstring'''
if config_path is not None:
lowercase__ : Any = UniSpeechSatConfig.from_pretrained(_lowerCAmelCase )
else:
lowercase__ : Any = UniSpeechSatConfig()
lowercase__ : Union[str, Any] = ''
if is_finetuned:
lowercase__ : Optional[Any] = UniSpeechSatForCTC(_lowerCAmelCase )
else:
lowercase__ : List[Any] = UniSpeechSatForPreTraining(_lowerCAmelCase )
lowercase__ , lowercase__ , lowercase__ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
lowercase__ : Union[str, Any] = model[0].eval()
recursively_load_weights(_lowerCAmelCase , _lowerCAmelCase )
hf_wavavec.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_UpperCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_UpperCamelCase : str = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 645
| 1
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase : List[Any] = logging.get_logger()
def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : str , _lowerCAmelCase : LevitConfig , _lowerCAmelCase : Path , _lowerCAmelCase : bool = True ):
'''simple docstring'''
print(f"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
lowercase__ : Tuple = timm.create_model('levit_128s' , pretrained=_lowerCAmelCase )
else:
lowercase__ : List[Any] = timm.create_model('levit_128' , pretrained=_lowerCAmelCase )
if hidden_sizes == 192:
lowercase__ : List[Any] = timm.create_model('levit_192' , pretrained=_lowerCAmelCase )
if hidden_sizes == 256:
lowercase__ : Dict = timm.create_model('levit_256' , pretrained=_lowerCAmelCase )
if hidden_sizes == 384:
lowercase__ : Any = timm.create_model('levit_384' , pretrained=_lowerCAmelCase )
from_model.eval()
lowercase__ : Dict = LevitForImageClassificationWithTeacher(_lowerCAmelCase ).eval()
lowercase__ : str = OrderedDict()
lowercase__ : Any = from_model.state_dict()
lowercase__ : Tuple = list(from_model.state_dict().keys() )
lowercase__ : List[str] = list(our_model.state_dict().keys() )
print(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) )
for i in range(len(_lowerCAmelCase ) ):
lowercase__ : Tuple = weights[og_keys[i]]
our_model.load_state_dict(_lowerCAmelCase )
lowercase__ : List[str] = torch.randn((2, 3, 224, 224) )
lowercase__ : Optional[Any] = from_model(_lowerCAmelCase )
lowercase__ : Union[str, Any] = our_model(_lowerCAmelCase ).logits
assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase ), "The model logits don't match the original one."
lowercase__ : Dict = name
print(_lowerCAmelCase )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
lowercase__ : int = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f"""Pushed {checkpoint_name}""" )
def a_ ( _lowerCAmelCase : Path , _lowerCAmelCase : str = None , _lowerCAmelCase : bool = True ):
'''simple docstring'''
lowercase__ : int = 'imagenet-1k-id2label.json'
lowercase__ : Union[str, Any] = 1000
lowercase__ : Any = (1, num_labels)
lowercase__ : List[Any] = 'huggingface/label-files'
lowercase__ : Optional[int] = num_labels
lowercase__ : Tuple = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type='dataset' ) , 'r' ) )
lowercase__ : Union[str, Any] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
lowercase__ : int = idalabel
lowercase__ : List[str] = {v: k for k, v in idalabel.items()}
lowercase__ : Optional[Any] = partial(_lowerCAmelCase , num_labels=_lowerCAmelCase , idalabel=_lowerCAmelCase , labelaid=_lowerCAmelCase )
lowercase__ : Optional[int] = {
'levit-128S': 128,
'levit-128': 128,
'levit-192': 192,
'levit-256': 256,
'levit-384': 384,
}
lowercase__ : List[str] = {
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , _lowerCAmelCase , names_to_config[model_name] , _lowerCAmelCase , _lowerCAmelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return config, expected_shape
if __name__ == "__main__":
_UpperCamelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help="The name of the model you wish to convert, it must be one of the supported Levit* architecture,",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="levit-dump-folder/",
type=Path,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
_UpperCamelCase : Optional[int] = parser.parse_args()
_UpperCamelCase : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 645
|
"""simple docstring"""
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class UpperCAmelCase_ :
def __init__( self , a , a=1_3 , a=3_2 , a=2 , a=3 , a=1_6 , a=[1, 2, 1] , a=[2, 2, 4] , a=2 , a=2.0 , a=True , a=0.0 , a=0.0 , a=0.1 , a="gelu" , a=False , a=True , a=0.02 , a=1e-5 , a=True , a=None , a=True , a=1_0 , a=8 , a=["stage1", "stage2", "stage3"] , a=[1, 2, 3] , ) -> int:
lowercase__ : int = parent
lowercase__ : Union[str, Any] = batch_size
lowercase__ : Dict = image_size
lowercase__ : str = patch_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : List[str] = embed_dim
lowercase__ : Any = depths
lowercase__ : Dict = num_heads
lowercase__ : List[str] = window_size
lowercase__ : int = mlp_ratio
lowercase__ : Tuple = qkv_bias
lowercase__ : Union[str, Any] = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : Tuple = drop_path_rate
lowercase__ : List[str] = hidden_act
lowercase__ : Optional[Any] = use_absolute_embeddings
lowercase__ : Optional[Any] = patch_norm
lowercase__ : Any = layer_norm_eps
lowercase__ : List[Any] = initializer_range
lowercase__ : List[str] = is_training
lowercase__ : int = scope
lowercase__ : Optional[int] = use_labels
lowercase__ : List[Any] = type_sequence_label_size
lowercase__ : List[str] = encoder_stride
lowercase__ : Optional[Any] = out_features
lowercase__ : Dict = out_indices
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Optional[Any] = None
if self.use_labels:
lowercase__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Tuple = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _UpperCAmelCase ( self , a , a , a ) -> Dict:
lowercase__ : Tuple = MaskFormerSwinModel(config=a )
model.to(a )
model.eval()
lowercase__ : str = model(a )
lowercase__ : str = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowercase__ : Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _UpperCAmelCase ( self , a , a , a ) -> Optional[int]:
lowercase__ : List[Any] = MaskFormerSwinBackbone(config=a )
model.to(a )
model.eval()
lowercase__ : int = model(a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [1_3, 1_6, 1_6, 1_6] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [1_6, 3_2, 6_4] )
# verify ValueError
with self.parent.assertRaises(a ):
lowercase__ : Dict = ['stem']
lowercase__ : List[str] = MaskFormerSwinBackbone(config=a )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : int = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Tuple = config_and_inputs
lowercase__ : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : Optional[int] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : List[str] = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
lowerCamelCase__ : str = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : int = False
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : str = MaskFormerSwinModelTester(self )
lowercase__ : Tuple = ConfigTester(self , config_class=a , embed_dim=3_7 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'
' `nn.DataParallel`'
) )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
def _UpperCAmelCase ( self ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCAmelCase ( self ) -> str:
return
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*a )
@unittest.skip('Swin does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> Tuple:
pass
@unittest.skip('Swin does not support feedforward chunking' )
def _UpperCAmelCase ( self ) -> Tuple:
pass
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def _UpperCAmelCase ( self ) -> str:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Any = model_class(a )
lowercase__ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[Any] = [*signature.parameters.keys()]
lowercase__ : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
@unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' )
def _UpperCAmelCase ( self ) -> List[Any]:
pass
@unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' )
def _UpperCAmelCase ( self ) -> int:
pass
def _UpperCAmelCase ( self , a , a , a , a ) -> Tuple:
lowercase__ : Dict = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : str = model(**self._prepare_for_class(a , a ) )
lowercase__ : List[Any] = outputs.hidden_states
lowercase__ : str = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(a ) , a )
# Swin has a different seq_length
lowercase__ : Dict = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowercase__ : List[str] = True
self.check_hidden_states_output(a , a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : List[str] = True
self.check_hidden_states_output(a , a , a , a )
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Union[str, Any] = 3
lowercase__ : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase__ : Tuple = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase__ : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowercase__ : List[str] = True
self.check_hidden_states_output(a , a , a , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : int = True
self.check_hidden_states_output(a , a , a , (padded_height, padded_width) )
@unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def _UpperCAmelCase ( self ) -> Any:
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def _UpperCAmelCase ( self ) -> Any:
pass
def _UpperCAmelCase ( self ) -> Any:
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(a ):
lowercase__ : Union[str, Any] = 0
return t
def check_equivalence(a , a , a , a={} ):
with torch.no_grad():
lowercase__ : Optional[Any] = model(**a , return_dict=a , **a )
lowercase__ : Optional[int] = model(**a , return_dict=a , **a ).to_tuple()
def recursive_check(a , a ):
if isinstance(a , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(a , a ):
recursive_check(a , a )
elif isinstance(a , a ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(a , a )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(a ) , set_nan_tensor_to_zero(a ) , atol=1e-5 ) , msg=(
'Tuple and dict output are not equal. Difference:'
f""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"""
f""" {torch.isnan(a ).any()} and `inf`: {torch.isinf(a )}. Dict has"""
f""" `nan`: {torch.isnan(a ).any()} and `inf`: {torch.isinf(a )}."""
) , )
recursive_check(a , a )
for model_class in self.all_model_classes:
lowercase__ : Any = model_class(a )
model.to(a )
model.eval()
lowercase__ : Tuple = self._prepare_for_class(a , a )
lowercase__ : Optional[Any] = self._prepare_for_class(a , a )
check_equivalence(a , a , a )
lowercase__ : Any = self._prepare_for_class(a , a , return_labels=a )
lowercase__ : List[Any] = self._prepare_for_class(a , a , return_labels=a )
check_equivalence(a , a , a )
lowercase__ : Any = self._prepare_for_class(a , a )
lowercase__ : int = self._prepare_for_class(a , a )
check_equivalence(a , a , a , {'output_hidden_states': True} )
lowercase__ : Dict = self._prepare_for_class(a , a , return_labels=a )
lowercase__ : Optional[int] = self._prepare_for_class(a , a , return_labels=a )
check_equivalence(a , a , a , {'output_hidden_states': True} )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase , _a):
lowerCamelCase__ : Dict = (MaskFormerSwinBackbone,) if is_torch_available() else ()
lowerCamelCase__ : Optional[int] = MaskFormerSwinConfig
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ : Optional[int] = MaskFormerSwinModelTester(self )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : int = inputs_dict['pixel_values'].shape[0]
for backbone_class in self.all_model_classes:
lowercase__ : Optional[Any] = backbone_class(a )
backbone.to(a )
backbone.eval()
lowercase__ : Union[str, Any] = backbone(**a )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , a )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
lowercase__ : List[str] = backbone(**a , output_hidden_states=a )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
lowercase__ , lowercase__ , lowercase__ : int = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
lowercase__ : List[Any] = backbone(**a , output_attentions=a )
self.assertIsNotNone(outputs.attentions )
| 645
| 1
|
"""simple docstring"""
from __future__ import annotations
def a_ ( _lowerCAmelCase : dict , _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ , lowercase__ : int = set(_lowerCAmelCase ), [start]
while stack:
lowercase__ : Dict = stack.pop()
explored.add(_lowerCAmelCase )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(_lowerCAmelCase )
return explored
_UpperCamelCase : Tuple = {
"A": ["B", "C", "D"],
"B": ["A", "D", "E"],
"C": ["A", "F"],
"D": ["B", "D"],
"E": ["B", "F"],
"F": ["C", "E", "G"],
"G": ["F"],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, "A"))
| 645
|
"""simple docstring"""
import math
def a_ ( _lowerCAmelCase : int = 100 ):
'''simple docstring'''
lowercase__ : Union[str, Any] = sum(i * i for i in range(1 , n + 1 ) )
lowercase__ : str = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 645
| 1
|
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self , a ) -> str:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
lowercase__ : str = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(a )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Dict = 'sshleifer/tiny-gpt2'
lowercase__ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a , multi_process=a , )
lowercase__ : str = TensorFlowBenchmark(a )
lowercase__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : List[str] = 'sgugger/tiny-distilbert-classification'
lowercase__ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , only_pretrain_model=a , )
lowercase__ : Optional[Any] = TensorFlowBenchmark(a )
lowercase__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : Optional[int] = 'sshleifer/tiny-gpt2'
lowercase__ : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : Optional[Any] = TensorFlowBenchmark(a )
lowercase__ : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : Any = 'sshleifer/tiny-gpt2'
lowercase__ : List[Any] = AutoConfig.from_pretrained(a )
lowercase__ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a , multi_process=a , )
lowercase__ : Tuple = TensorFlowBenchmark(a , [config] )
lowercase__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Optional[Any] = 'sshleifer/tiny-gpt2'
lowercase__ : List[str] = AutoConfig.from_pretrained(a )
lowercase__ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : List[str] = TensorFlowBenchmark(a , [config] )
lowercase__ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Optional[Any] = 'sshleifer/tiny-gpt2'
lowercase__ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : Optional[Any] = TensorFlowBenchmark(a )
lowercase__ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Optional[Any] = 'sshleifer/tiny-gpt2'
lowercase__ : Optional[int] = AutoConfig.from_pretrained(a )
lowercase__ : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : str = TensorFlowBenchmark(a , [config] )
lowercase__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : List[str] = 'patrickvonplaten/t5-tiny-random'
lowercase__ : Any = AutoConfig.from_pretrained(a )
lowercase__ : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : int = TensorFlowBenchmark(a , configs=[config] )
lowercase__ : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 , 'Cannot do xla on CPU.' )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Any = 'sshleifer/tiny-gpt2'
lowercase__ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , use_xla=a , multi_process=a , )
lowercase__ : Any = TensorFlowBenchmark(a )
lowercase__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Any = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=a , save_to_csv=a , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(a , 'inf_time.csv' ) , inference_memory_csv_file=os.path.join(a , 'inf_mem.csv' ) , env_info_csv_file=os.path.join(a , 'env.csv' ) , multi_process=a , )
lowercase__ : Union[str, Any] = TensorFlowBenchmark(a )
benchmark.run()
self.assertTrue(Path(os.path.join(a , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(a , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(a , 'env.csv' ) ).exists() )
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ : Tuple = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(a ):
self.assertTrue(hasattr(a , 'sequential' ) )
self.assertTrue(hasattr(a , 'cumulative' ) )
self.assertTrue(hasattr(a , 'current' ) )
self.assertTrue(hasattr(a , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=a , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(a , 'log.txt' ) , log_print=a , trace_memory_line_by_line=a , eager_mode=a , multi_process=a , )
lowercase__ : Optional[int] = TensorFlowBenchmark(a )
lowercase__ : Optional[Any] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(a , 'log.txt' ) ).exists() )
| 645
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ , lowercase__ : str = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-canny' , from_pt=a , dtype=jnp.bfloataa )
lowercase__ , lowercase__ : List[str] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=a , from_pt=a , dtype=jnp.bfloataa )
lowercase__ : List[Any] = controlnet_params
lowercase__ : int = 'bird'
lowercase__ : List[Any] = jax.device_count()
lowercase__ : Dict = pipe.prepare_text_inputs([prompts] * num_samples )
lowercase__ : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' )
lowercase__ : Optional[int] = pipe.prepare_image_inputs([canny_image] * num_samples )
lowercase__ : List[Any] = jax.random.PRNGKey(0 )
lowercase__ : Tuple = jax.random.split(a , jax.device_count() )
lowercase__ : str = replicate(a )
lowercase__ : List[str] = shard(a )
lowercase__ : Dict = shard(a )
lowercase__ : List[Any] = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=5_0 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3)
lowercase__ : Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowercase__ : Tuple = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
lowercase__ : int = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase__ : Optional[Any] = jnp.array(
[0.167_969, 0.116_699, 0.081_543, 0.154_297, 0.132_812, 0.108_887, 0.169_922, 0.169_922, 0.205_078] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ , lowercase__ : int = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-openpose' , from_pt=a , dtype=jnp.bfloataa )
lowercase__ , lowercase__ : Optional[Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=a , from_pt=a , dtype=jnp.bfloataa )
lowercase__ : Optional[Any] = controlnet_params
lowercase__ : List[Any] = 'Chef in the kitchen'
lowercase__ : List[str] = jax.device_count()
lowercase__ : Dict = pipe.prepare_text_inputs([prompts] * num_samples )
lowercase__ : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' )
lowercase__ : Optional[int] = pipe.prepare_image_inputs([pose_image] * num_samples )
lowercase__ : List[str] = jax.random.PRNGKey(0 )
lowercase__ : str = jax.random.split(a , jax.device_count() )
lowercase__ : Optional[Any] = replicate(a )
lowercase__ : Optional[Any] = shard(a )
lowercase__ : List[Any] = shard(a )
lowercase__ : List[Any] = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=5_0 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3)
lowercase__ : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowercase__ : List[str] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
lowercase__ : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase__ : str = jnp.array(
[[0.271_484, 0.261_719, 0.275_391, 0.277_344, 0.279_297, 0.291_016, 0.294_922, 0.302_734, 0.302_734]] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 645
| 1
|
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_UpperCamelCase : Optional[int] = 16
_UpperCamelCase : List[str] = 32
def a_ ( _lowerCAmelCase : Accelerator , _lowerCAmelCase : int = 16 , _lowerCAmelCase : str = "bert-base-cased" ):
'''simple docstring'''
lowercase__ : Dict = AutoTokenizer.from_pretrained(_lowerCAmelCase )
lowercase__ : List[str] = load_dataset('glue' , 'mrpc' )
def tokenize_function(_lowerCAmelCase : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ : Optional[Any] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowercase__ : Optional[Any] = datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=_lowerCAmelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ : Any = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_lowerCAmelCase : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowerCAmelCase , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(_lowerCAmelCase , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
lowercase__ : List[Any] = DataLoader(
tokenized_datasets['train'] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
lowercase__ : str = DataLoader(
tokenized_datasets['validation'] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
return train_dataloader, eval_dataloader
def a_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any] ):
'''simple docstring'''
lowercase__ : Optional[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ : Optional[Any] = config['lr']
lowercase__ : str = int(config['num_epochs'] )
lowercase__ : int = int(config['seed'] )
lowercase__ : List[Any] = int(config['batch_size'] )
lowercase__ : str = args.model_name_or_path
set_seed(_lowerCAmelCase )
lowercase__ , lowercase__ : Dict = get_dataloaders(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ : List[str] = AutoModelForSequenceClassification.from_pretrained(_lowerCAmelCase , return_dict=_lowerCAmelCase )
# Instantiate optimizer
lowercase__ : Dict = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowercase__ : Optional[int] = optimizer_cls(params=model.parameters() , lr=_lowerCAmelCase )
if accelerator.state.deepspeed_plugin is not None:
lowercase__ : Dict = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
lowercase__ : Optional[Any] = 1
lowercase__ : Optional[Any] = (len(_lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowercase__ : List[str] = get_linear_schedule_with_warmup(
optimizer=_lowerCAmelCase , num_warmup_steps=0 , num_training_steps=_lowerCAmelCase , )
else:
lowercase__ : List[Any] = DummyScheduler(_lowerCAmelCase , total_num_steps=_lowerCAmelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = accelerator.prepare(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# We need to keep track of how many total steps we have iterated over
lowercase__ : Union[str, Any] = 0
# We also need to keep track of the stating epoch so files are named properly
lowercase__ : Dict = 0
# Now we train the model
lowercase__ : List[Any] = evaluate.load('glue' , 'mrpc' )
lowercase__ : Dict = 0
lowercase__ : Union[str, Any] = {}
for epoch in range(_lowerCAmelCase , _lowerCAmelCase ):
model.train()
for step, batch in enumerate(_lowerCAmelCase ):
lowercase__ : Dict = model(**_lowerCAmelCase )
lowercase__ : Optional[Any] = outputs.loss
lowercase__ : Dict = loss / gradient_accumulation_steps
accelerator.backward(_lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
lowercase__ : Tuple = 0
for step, batch in enumerate(_lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ : Dict = model(**_lowerCAmelCase )
lowercase__ : Optional[int] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowercase__ , lowercase__ : Dict = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_lowerCAmelCase ) - 1:
lowercase__ : Optional[int] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowercase__ : Union[str, Any] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_lowerCAmelCase , references=_lowerCAmelCase , )
lowercase__ : Optional[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , _lowerCAmelCase )
lowercase__ : List[str] = eval_metric['accuracy']
if best_performance < eval_metric["accuracy"]:
lowercase__ : Optional[Any] = eval_metric['accuracy']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"""
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
def a_ ( ):
'''simple docstring'''
lowercase__ : List[Any] = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=_lowerCAmelCase , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_lowerCAmelCase , )
parser.add_argument(
'--output_dir' , type=_lowerCAmelCase , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--performance_lower_bound' , type=_lowerCAmelCase , default=_lowerCAmelCase , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , )
parser.add_argument(
'--num_epochs' , type=_lowerCAmelCase , default=3 , help='Number of train epochs.' , )
lowercase__ : Tuple = parser.parse_args()
lowercase__ : int = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
main()
| 645
|
"""simple docstring"""
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 645
| 1
|
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
_UpperCamelCase : Tuple = get_logger()
_UpperCamelCase : Optional[dict] = None
class UpperCAmelCase_ ( TensorFormatter[Mapping, "jax.Array", Mapping]):
def __init__( self , a=None , a=None , **a ) -> List[str]:
super().__init__(features=a )
import jax
from jaxlib.xla_client import Device
if isinstance(a , a ):
raise ValueError(
f"""Expected {device} to be a `str` not {type(a )}, as `jaxlib.xla_extension.Device` """
'is not serializable neither with `pickle` nor with `dill`. Instead you can surround '
'the device with `str()` to get its string identifier that will be internally mapped '
'to the actual `jaxlib.xla_extension.Device`.' )
lowercase__ : Union[str, Any] = device if isinstance(a , a ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
lowercase__ : List[str] = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f"""Device with string identifier {self.device} not listed among the available """
f"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """
f"""device: {str(jax.devices()[0] )}.""" )
lowercase__ : List[Any] = str(jax.devices()[0] )
lowercase__ : str = jnp_array_kwargs
@staticmethod
def _UpperCAmelCase ( ) -> Dict[str, "jaxlib.xla_extension.Device"]:
import jax
return {str(a ): device for device in jax.devices()}
def _UpperCAmelCase ( self , a ) -> List[Any]:
import jax
import jax.numpy as jnp
if isinstance(a , a ) and column:
if all(
isinstance(a , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(a , axis=0 )
return column
def _UpperCAmelCase ( self , a ) -> Optional[Any]:
import jax
import jax.numpy as jnp
if isinstance(a , (str, bytes, type(a )) ):
return value
elif isinstance(a , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
lowercase__ : Any = {}
if isinstance(a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
lowercase__ : Optional[int] = {'dtype': jnp.intaa}
else:
lowercase__ : List[str] = {'dtype': jnp.intaa}
elif isinstance(a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
lowercase__ : Union[str, Any] = {'dtype': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(a , PIL.Image.Image ):
lowercase__ : Optional[Any] = np.asarray(a )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
lowercase__ : List[str] = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(a , **{**default_dtype, **self.jnp_array_kwargs} )
def _UpperCAmelCase ( self , a ) -> Any:
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(a , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(a , '__array__' ) and not isinstance(a , jax.Array ):
lowercase__ : Tuple = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(a , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(a ) for substruct in data_struct] )
elif isinstance(a , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(a ) for substruct in data_struct] )
return self._tensorize(a )
def _UpperCAmelCase ( self , a ) -> Any:
return map_nested(self._recursive_tensorize , a , map_list=a )
def _UpperCAmelCase ( self , a ) -> Mapping:
lowercase__ : Optional[int] = self.numpy_arrow_extractor().extract_row(a )
lowercase__ : List[Any] = self.python_features_decoder.decode_row(a )
return self.recursive_tensorize(a )
def _UpperCAmelCase ( self , a ) -> "jax.Array":
lowercase__ : Union[str, Any] = self.numpy_arrow_extractor().extract_column(a )
lowercase__ : List[str] = self.python_features_decoder.decode_column(a , pa_table.column_names[0] )
lowercase__ : Union[str, Any] = self.recursive_tensorize(a )
lowercase__ : str = self._consolidate(a )
return column
def _UpperCAmelCase ( self , a ) -> Mapping:
lowercase__ : List[Any] = self.numpy_arrow_extractor().extract_batch(a )
lowercase__ : str = self.python_features_decoder.decode_batch(a )
lowercase__ : List[str] = self.recursive_tensorize(a )
for column_name in batch:
lowercase__ : List[Any] = self._consolidate(batch[column_name] )
return batch
| 645
|
"""simple docstring"""
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Optional[Any] = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
lowercase__ : Union[str, Any] = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
model.to(a )
from datasets import load_dataset
lowercase__ : str = load_dataset('nielsr/rvlcdip-demo' )
lowercase__ : Tuple = dataset['train'][0]['image'].convert('RGB' )
lowercase__ : int = image_processor(a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ : List[str] = model(**a )
lowercase__ : List[Any] = outputs.logits
lowercase__ : Union[str, Any] = torch.Size((1, 1_6) )
self.assertEqual(logits.shape , a )
lowercase__ : Tuple = torch.tensor(
[-0.4_158, -0.4_092, -0.4_347] , device=a , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , a , atol=1e-4 ) )
| 645
| 1
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class UpperCAmelCase_ ( metaclass=_a):
lowerCamelCase__ : str = ["onnx"]
def __init__( self , *a , **a ) -> List[str]:
requires_backends(self , ['onnx'] )
@classmethod
def _UpperCAmelCase ( cls , *a , **a ) -> Tuple:
requires_backends(cls , ['onnx'] )
@classmethod
def _UpperCAmelCase ( cls , *a , **a ) -> str:
requires_backends(cls , ['onnx'] )
| 645
|
"""simple docstring"""
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase_ :
@staticmethod
def _UpperCAmelCase ( *a , **a ) -> int:
pass
def a_ ( _lowerCAmelCase : Image ):
'''simple docstring'''
lowercase__ : List[str] = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
lowerCamelCase__ : Union[str, Any] = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def _UpperCAmelCase ( self , a , a , a ) -> Dict:
lowercase__ : Union[str, Any] = DepthEstimationPipeline(model=a , image_processor=a )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def _UpperCAmelCase ( self , a , a ) -> Optional[int]:
lowercase__ : Tuple = depth_estimator('./tests/fixtures/tests_samples/COCO/000000039769.png' )
self.assertEqual({'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )} , a )
import datasets
lowercase__ : Tuple = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' )
lowercase__ : List[Any] = depth_estimator(
[
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
] )
self.assertEqual(
[
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
] , a , )
@require_tf
@unittest.skip('Depth estimation is not implemented in TF' )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
@slow
@require_torch
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Tuple = 'Intel/dpt-large'
lowercase__ : Optional[int] = pipeline('depth-estimation' , model=a )
lowercase__ : List[Any] = depth_estimator('http://images.cocodataset.org/val2017/000000039769.jpg' )
lowercase__ : Optional[Any] = hashimage(outputs['depth'] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['predicted_depth'].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs['predicted_depth'].min().item() ) , 2.662 )
@require_torch
def _UpperCAmelCase ( self ) -> Optional[int]:
# This is highly irregular to have no small tests.
self.skipTest('There is not hf-internal-testing tiny model for either GLPN nor DPT' )
| 645
| 1
|
"""simple docstring"""
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class UpperCAmelCase_ ( _a):
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(a , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(a , 'num_attention_heads' ) )
class UpperCAmelCase_ :
def __init__( self , a , a=1_3 , a=6_4 , a=3 , a=3 , a=2 , a=1 , a=1_6 , a=[1_2_8, 2_5_6, 3_8_4] , a=[4, 6, 8] , a=[2, 3, 4] , a=[1_6, 1_6, 1_6] , a=0 , a=[2, 2, 2] , a=[2, 2, 2] , a=0.02 , a=True , a=True , a=2 , ) -> List[str]:
lowercase__ : List[Any] = parent
lowercase__ : Dict = batch_size
lowercase__ : Optional[int] = image_size
lowercase__ : int = num_channels
lowercase__ : Optional[Any] = kernel_size
lowercase__ : List[str] = stride
lowercase__ : Tuple = padding
lowercase__ : int = hidden_sizes
lowercase__ : int = num_attention_heads
lowercase__ : List[str] = depths
lowercase__ : Optional[int] = key_dim
lowercase__ : List[str] = drop_path_rate
lowercase__ : int = patch_size
lowercase__ : Union[str, Any] = attention_ratio
lowercase__ : Any = mlp_ratio
lowercase__ : Dict = initializer_range
lowercase__ : str = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
lowercase__ : int = is_training
lowercase__ : Union[str, Any] = use_labels
lowercase__ : Dict = num_labels
lowercase__ : Tuple = initializer_range
def _UpperCAmelCase ( self ) -> str:
lowercase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : str = None
if self.use_labels:
lowercase__ : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
lowercase__ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> Tuple:
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def _UpperCAmelCase ( self , a , a , a ) -> Any:
lowercase__ : List[str] = LevitModel(config=a )
model.to(a )
model.eval()
lowercase__ : int = model(a )
lowercase__ : Tuple = (self.image_size, self.image_size)
lowercase__ , lowercase__ : Optional[int] = image_size[0], image_size[1]
for _ in range(4 ):
lowercase__ : List[Any] = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
lowercase__ : Tuple = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def _UpperCAmelCase ( self , a , a , a ) -> List[Any]:
lowercase__ : Dict = self.num_labels
lowercase__ : List[Any] = LevitForImageClassification(a )
model.to(a )
model.eval()
lowercase__ : Tuple = model(a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : int = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Optional[int] = config_and_inputs
lowercase__ : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : Dict = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
lowerCamelCase__ : str = (
{
"feature-extraction": LevitModel,
"image-classification": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowerCamelCase__ : Union[str, Any] = False
lowerCamelCase__ : Tuple = False
lowerCamelCase__ : Optional[Any] = False
lowerCamelCase__ : Optional[int] = False
lowerCamelCase__ : List[str] = False
def _UpperCAmelCase ( self ) -> str:
lowercase__ : List[str] = LevitModelTester(self )
lowercase__ : List[str] = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=3_7 )
def _UpperCAmelCase ( self ) -> List[str]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCAmelCase ( self ) -> Tuple:
return
@unittest.skip(reason='Levit does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='Levit does not support input and output embeddings' )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
@unittest.skip(reason='Levit does not output attentions' )
def _UpperCAmelCase ( self ) -> Any:
pass
def _UpperCAmelCase ( self ) -> int:
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Tuple = model_class(a )
lowercase__ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[int] = [*signature.parameters.keys()]
lowercase__ : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def _UpperCAmelCase ( self ) -> str:
def check_hidden_states_output(a , a , a ):
lowercase__ : Tuple = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : List[Any] = model(**self._prepare_for_class(a , a ) )
lowercase__ : List[Any] = outputs.hidden_states
lowercase__ : Union[str, Any] = len(self.model_tester.depths ) + 1
self.assertEqual(len(a ) , a )
lowercase__ : List[str] = (self.model_tester.image_size, self.model_tester.image_size)
lowercase__ , lowercase__ : Any = image_size[0], image_size[1]
for _ in range(4 ):
lowercase__ : Union[str, Any] = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
lowercase__ : Any = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Optional[int] = True
check_hidden_states_output(a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Union[str, Any] = True
check_hidden_states_output(a , a , a )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _UpperCAmelCase ( self ) -> Tuple:
pass
def _UpperCAmelCase ( self , a , a , a=False ) -> Tuple:
lowercase__ : str = super()._prepare_for_class(a , a , return_labels=a )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a )
def _UpperCAmelCase ( self ) -> int:
if not self.model_tester.is_training:
return
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Union[str, Any] = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(a )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
lowercase__ : Union[str, Any] = model_class(a )
model.to(a )
model.train()
lowercase__ : Optional[Any] = self._prepare_for_class(a , a , return_labels=a )
lowercase__ : List[Any] = model(**a ).loss
loss.backward()
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowercase__ : Tuple = False
lowercase__ : Union[str, Any] = True
for model_class in self.all_model_classes:
if model_class in get_values(a ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
lowercase__ : int = model_class(a )
model.gradient_checkpointing_enable()
model.to(a )
model.train()
lowercase__ : int = self._prepare_for_class(a , a , return_labels=a )
lowercase__ : Optional[int] = model(**a ).loss
loss.backward()
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ , lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Tuple = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(a ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f"""Testing {model_class} with {problem_type["title"]}""" ):
lowercase__ : Any = problem_type['title']
lowercase__ : List[str] = problem_type['num_labels']
lowercase__ : Any = model_class(a )
model.to(a )
model.train()
lowercase__ : List[Any] = self._prepare_for_class(a , a , return_labels=a )
if problem_type["num_labels"] > 1:
lowercase__ : List[Any] = inputs['labels'].unsqueeze(1 ).repeat(1 , problem_type['num_labels'] )
lowercase__ : Any = inputs['labels'].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=a ) as warning_list:
lowercase__ : str = model(**a ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def _UpperCAmelCase ( self ) -> str:
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Union[str, Any] = LevitModel.from_pretrained(a )
self.assertIsNotNone(a )
def a_ ( ):
'''simple docstring'''
lowercase__ : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
@cached_property
def _UpperCAmelCase ( self ) -> Any:
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Optional[int] = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
a )
lowercase__ : List[str] = self.default_image_processor
lowercase__ : str = prepare_img()
lowercase__ : List[str] = image_processor(images=a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ : List[Any] = model(**a )
# verify the logits
lowercase__ : Union[str, Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , a )
lowercase__ : Dict = torch.tensor([1.0_448, -0.3_745, -1.8_317] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 ) )
| 645
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class UpperCAmelCase_ ( _a):
def __init__( self ) -> Any:
lowercase__ : Tuple = []
def _UpperCAmelCase ( self , a , a , a , **a ) -> Any:
self.events.append('on_init_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Optional[int]:
self.events.append('on_train_begin' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> List[str]:
self.events.append('on_train_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> int:
self.events.append('on_epoch_begin' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Optional[Any]:
self.events.append('on_epoch_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> int:
self.events.append('on_step_begin' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> str:
self.events.append('on_step_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> int:
self.events.append('on_evaluate' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Tuple:
self.events.append('on_predict' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Union[str, Any]:
self.events.append('on_save' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> List[str]:
self.events.append('on_log' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Any:
self.events.append('on_prediction_step' )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self ) -> str:
lowercase__ : str = tempfile.mkdtemp()
def _UpperCAmelCase ( self ) -> Dict:
shutil.rmtree(self.output_dir )
def _UpperCAmelCase ( self , a=0 , a=0 , a=6_4 , a=6_4 , a=None , a=False , **a ) -> int:
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
lowercase__ : str = RegressionDataset(length=a )
lowercase__ : Any = RegressionDataset(length=a )
lowercase__ : Optional[Any] = RegressionModelConfig(a=a , b=a )
lowercase__ : Union[str, Any] = RegressionPreTrainedModel(a )
lowercase__ : Tuple = TrainingArguments(self.output_dir , disable_tqdm=a , report_to=[] , **a )
return Trainer(
a , a , train_dataset=a , eval_dataset=a , callbacks=a , )
def _UpperCAmelCase ( self , a , a ) -> Union[str, Any]:
self.assertEqual(len(a ) , len(a ) )
# Order doesn't matter
lowercase__ : Optional[int] = sorted(a , key=lambda a : cb.__name__ if isinstance(a , a ) else cb.__class__.__name__ )
lowercase__ : Tuple = sorted(a , key=lambda a : cb.__name__ if isinstance(a , a ) else cb.__class__.__name__ )
for cba, cba in zip(a , a ):
if isinstance(a , a ) and isinstance(a , a ):
self.assertEqual(a , a )
elif isinstance(a , a ) and not isinstance(a , a ):
self.assertEqual(a , cba.__class__ )
elif not isinstance(a , a ) and isinstance(a , a ):
self.assertEqual(cba.__class__ , a )
else:
self.assertEqual(a , a )
def _UpperCAmelCase ( self , a ) -> Optional[Any]:
lowercase__ : Dict = ['on_init_end', 'on_train_begin']
lowercase__ : List[Any] = 0
lowercase__ : Optional[int] = len(trainer.get_eval_dataloader() )
lowercase__ : Tuple = ['on_prediction_step'] * len(trainer.get_eval_dataloader() ) + ['on_log', 'on_evaluate']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('on_epoch_begin' )
for _ in range(a ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('on_log' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('on_save' )
expected_events.append('on_epoch_end' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : int = self.get_trainer()
lowercase__ : str = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
# Callbacks passed at init are added to the default callbacks
lowercase__ : str = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
lowercase__ : List[Any] = self.get_trainer(disable_tqdm=a )
lowercase__ : Optional[Any] = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : int = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
lowercase__ : List[str] = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(a )
expected_callbacks.remove(a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
lowercase__ : Optional[Any] = self.get_trainer()
lowercase__ : List[Any] = trainer.pop_callback(a )
self.assertEqual(cb.__class__ , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
trainer.add_callback(a )
expected_callbacks.insert(0 , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
# We can also add, pop, or remove by instance
lowercase__ : int = self.get_trainer()
lowercase__ : List[str] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(a )
expected_callbacks.remove(a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
lowercase__ : Tuple = self.get_trainer()
lowercase__ : Dict = trainer.callback_handler.callbacks[0]
lowercase__ : Union[str, Any] = trainer.pop_callback(a )
self.assertEqual(a , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
trainer.add_callback(a )
expected_callbacks.insert(0 , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
def _UpperCAmelCase ( self ) -> Tuple:
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='ignore' , category=a )
lowercase__ : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
lowercase__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
# Independent log/save/eval
lowercase__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
lowercase__ : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
lowercase__ : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
lowercase__ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
lowercase__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='steps' )
trainer.train()
lowercase__ : Optional[int] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
lowercase__ : int = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='epoch' )
trainer.train()
lowercase__ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
# A bit of everything
lowercase__ : Any = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=1_0 , eval_steps=5 , evaluation_strategy='steps' , )
trainer.train()
lowercase__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
# warning should be emitted for duplicated callbacks
with patch('transformers.trainer_callback.logger.warning' ) as warn_mock:
lowercase__ : str = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(a ) in warn_mock.call_args[0][0]
| 645
| 1
|
"""simple docstring"""
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
return " ".join(
''.join(word[::-1] ) if len(_lowerCAmelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("Hey wollef sroirraw"))
| 645
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_UpperCamelCase : str = {
"configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Tuple = [
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoForQuestionAnswering",
"GPTNeoForSequenceClassification",
"GPTNeoForTokenClassification",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Dict = [
"FlaxGPTNeoForCausalLM",
"FlaxGPTNeoModel",
"FlaxGPTNeoPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
_UpperCamelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 645
| 1
|
"""simple docstring"""
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
_UpperCamelCase : Dict = getLogger(__name__)
_UpperCamelCase : List[Any] = "cuda" if torch.cuda.is_available() else "cpu"
def a_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : int = 8 , _lowerCAmelCase : str = DEFAULT_DEVICE , _lowerCAmelCase : Optional[Any]=False , _lowerCAmelCase : Union[str, Any]="summarization" , _lowerCAmelCase : Optional[Any]=None , **_lowerCAmelCase : Optional[Any] , ):
'''simple docstring'''
lowercase__ : List[str] = Path(_lowerCAmelCase ).open('w' , encoding='utf-8' )
lowercase__ : Any = str(_lowerCAmelCase )
lowercase__ : int = AutoModelForSeqaSeqLM.from_pretrained(_lowerCAmelCase ).to(_lowerCAmelCase )
if fpaa:
lowercase__ : Union[str, Any] = model.half()
lowercase__ : Optional[int] = AutoTokenizer.from_pretrained(_lowerCAmelCase )
logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
lowercase__ : str = time.time()
# update config with task specific params
use_task_specific_params(_lowerCAmelCase , _lowerCAmelCase )
if prefix is None:
lowercase__ : Tuple = prefix or getattr(model.config , 'prefix' , '' ) or ''
for examples_chunk in tqdm(list(chunks(_lowerCAmelCase , _lowerCAmelCase ) ) ):
lowercase__ : Tuple = [prefix + text for text in examples_chunk]
lowercase__ : Optional[int] = tokenizer(_lowerCAmelCase , return_tensors='pt' , truncation=_lowerCAmelCase , padding='longest' ).to(_lowerCAmelCase )
lowercase__ : int = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **_lowerCAmelCase , )
lowercase__ : str = tokenizer.batch_decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase )
for hypothesis in dec:
fout.write(hypothesis + '\n' )
fout.flush()
fout.close()
lowercase__ : str = int(time.time() - start_time ) # seconds
lowercase__ : Union[str, Any] = len(_lowerCAmelCase )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def a_ ( ):
'''simple docstring'''
return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )
def a_ ( _lowerCAmelCase : Tuple=True ):
'''simple docstring'''
lowercase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument('model_name' , type=_lowerCAmelCase , help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('input_path' , type=_lowerCAmelCase , help='like cnn_dm/test.source' )
parser.add_argument('save_path' , type=_lowerCAmelCase , help='where to save summaries' )
parser.add_argument('--reference_path' , type=_lowerCAmelCase , required=_lowerCAmelCase , help='like cnn_dm/test.target' )
parser.add_argument('--score_path' , type=_lowerCAmelCase , required=_lowerCAmelCase , default='metrics.json' , help='where to save metrics' )
parser.add_argument('--device' , type=_lowerCAmelCase , required=_lowerCAmelCase , default=_lowerCAmelCase , help='cuda, cuda:1, cpu etc.' )
parser.add_argument(
'--prefix' , type=_lowerCAmelCase , required=_lowerCAmelCase , default=_lowerCAmelCase , help='will be added to the begininng of src examples' )
parser.add_argument('--task' , type=_lowerCAmelCase , default='summarization' , help='used for task_specific_params + metrics' )
parser.add_argument('--bs' , type=_lowerCAmelCase , default=8 , required=_lowerCAmelCase , help='batch size' )
parser.add_argument(
'--n_obs' , type=_lowerCAmelCase , default=-1 , required=_lowerCAmelCase , help='How many observations. Defaults to all.' )
parser.add_argument('--fp16' , action='store_true' )
parser.add_argument('--dump-args' , action='store_true' , help='print the custom hparams with the results' )
parser.add_argument(
'--info' , nargs='?' , type=_lowerCAmelCase , const=datetime_now() , help=(
'use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'
' lang=en-ru. If no value is passed, the current datetime string will be used.'
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
lowercase__ , lowercase__ : Any = parser.parse_known_args()
lowercase__ : List[str] = parse_numeric_n_bool_cl_kwargs(_lowerCAmelCase )
if parsed_args and verbose:
print(f"""parsed the following generate kwargs: {parsed_args}""" )
lowercase__ : Union[str, Any] = [' ' + x.rstrip() if 't5' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
lowercase__ : Tuple = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=_lowerCAmelCase )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('Can\'t mix --fp16 and --device cpu' )
lowercase__ : List[Any] = generate_summaries_or_translations(
_lowerCAmelCase , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **_lowerCAmelCase , )
if args.reference_path is None:
return {}
# Compute scores
lowercase__ : Any = calculate_bleu if 'translation' in args.task else calculate_rouge
lowercase__ : Any = [x.rstrip() for x in open(args.save_path ).readlines()]
lowercase__ : str = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(_lowerCAmelCase )]
lowercase__ : dict = score_fn(_lowerCAmelCase , _lowerCAmelCase )
scores.update(_lowerCAmelCase )
if args.dump_args:
scores.update(_lowerCAmelCase )
if args.info:
lowercase__ : List[str] = args.info
if verbose:
print(_lowerCAmelCase )
if args.score_path is not None:
json.dump(_lowerCAmelCase , open(args.score_path , 'w' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 645
|
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self , a ) -> str:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
lowercase__ : str = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(a )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Dict = 'sshleifer/tiny-gpt2'
lowercase__ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a , multi_process=a , )
lowercase__ : str = TensorFlowBenchmark(a )
lowercase__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : List[str] = 'sgugger/tiny-distilbert-classification'
lowercase__ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , only_pretrain_model=a , )
lowercase__ : Optional[Any] = TensorFlowBenchmark(a )
lowercase__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : Optional[int] = 'sshleifer/tiny-gpt2'
lowercase__ : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : Optional[Any] = TensorFlowBenchmark(a )
lowercase__ : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : Any = 'sshleifer/tiny-gpt2'
lowercase__ : List[Any] = AutoConfig.from_pretrained(a )
lowercase__ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a , multi_process=a , )
lowercase__ : Tuple = TensorFlowBenchmark(a , [config] )
lowercase__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Optional[Any] = 'sshleifer/tiny-gpt2'
lowercase__ : List[str] = AutoConfig.from_pretrained(a )
lowercase__ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : List[str] = TensorFlowBenchmark(a , [config] )
lowercase__ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Optional[Any] = 'sshleifer/tiny-gpt2'
lowercase__ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : Optional[Any] = TensorFlowBenchmark(a )
lowercase__ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Optional[Any] = 'sshleifer/tiny-gpt2'
lowercase__ : Optional[int] = AutoConfig.from_pretrained(a )
lowercase__ : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : str = TensorFlowBenchmark(a , [config] )
lowercase__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : List[str] = 'patrickvonplaten/t5-tiny-random'
lowercase__ : Any = AutoConfig.from_pretrained(a )
lowercase__ : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : int = TensorFlowBenchmark(a , configs=[config] )
lowercase__ : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 , 'Cannot do xla on CPU.' )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Any = 'sshleifer/tiny-gpt2'
lowercase__ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , use_xla=a , multi_process=a , )
lowercase__ : Any = TensorFlowBenchmark(a )
lowercase__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Any = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=a , save_to_csv=a , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(a , 'inf_time.csv' ) , inference_memory_csv_file=os.path.join(a , 'inf_mem.csv' ) , env_info_csv_file=os.path.join(a , 'env.csv' ) , multi_process=a , )
lowercase__ : Union[str, Any] = TensorFlowBenchmark(a )
benchmark.run()
self.assertTrue(Path(os.path.join(a , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(a , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(a , 'env.csv' ) ).exists() )
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ : Tuple = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(a ):
self.assertTrue(hasattr(a , 'sequential' ) )
self.assertTrue(hasattr(a , 'cumulative' ) )
self.assertTrue(hasattr(a , 'current' ) )
self.assertTrue(hasattr(a , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=a , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(a , 'log.txt' ) , log_print=a , trace_memory_line_by_line=a , eager_mode=a , multi_process=a , )
lowercase__ : Optional[int] = TensorFlowBenchmark(a )
lowercase__ : Optional[Any] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(a , 'log.txt' ) ).exists() )
| 645
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCamelCase : int = logging.get_logger(__name__)
_UpperCamelCase : List[str] = {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/config.json",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/config.json",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : Tuple = "xlm-roberta"
def __init__( self , a=3_0_5_2_2 , a=7_6_8 , a=1_2 , a=1_2 , a=3_0_7_2 , a="gelu" , a=0.1 , a=0.1 , a=5_1_2 , a=2 , a=0.02 , a=1e-12 , a=1 , a=0 , a=2 , a="absolute" , a=True , a=None , **a , ) -> str:
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a )
lowercase__ : Dict = vocab_size
lowercase__ : Any = hidden_size
lowercase__ : Optional[int] = num_hidden_layers
lowercase__ : Union[str, Any] = num_attention_heads
lowercase__ : List[Any] = hidden_act
lowercase__ : List[str] = intermediate_size
lowercase__ : Tuple = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : List[Any] = max_position_embeddings
lowercase__ : Optional[int] = type_vocab_size
lowercase__ : List[Any] = initializer_range
lowercase__ : Dict = layer_norm_eps
lowercase__ : Dict = position_embedding_type
lowercase__ : Optional[Any] = use_cache
lowercase__ : int = classifier_dropout
class UpperCAmelCase_ ( _a):
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowercase__ : Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowercase__ : int = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 645
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class UpperCAmelCase_ ( _a):
def __init__( self , a , a=1_3 , a=7 , a=True , a=True , a=False , a=True , a=9_9 , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=5_1_2 , a=1_6 , a=2 , a=0.02 , a=3 , a=4 , a=None , ) -> Any:
lowercase__ : Tuple = parent
lowercase__ : List[Any] = batch_size
lowercase__ : List[Any] = seq_length
lowercase__ : List[Any] = is_training
lowercase__ : Optional[Any] = use_input_mask
lowercase__ : Optional[int] = use_token_type_ids
lowercase__ : int = use_labels
lowercase__ : Tuple = vocab_size
lowercase__ : int = hidden_size
lowercase__ : Any = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
lowercase__ : Optional[Any] = intermediate_size
lowercase__ : Optional[Any] = hidden_act
lowercase__ : List[str] = hidden_dropout_prob
lowercase__ : List[Any] = attention_probs_dropout_prob
lowercase__ : List[Any] = max_position_embeddings
lowercase__ : List[str] = type_vocab_size
lowercase__ : Tuple = type_sequence_label_size
lowercase__ : List[Any] = initializer_range
lowercase__ : str = num_labels
lowercase__ : Tuple = num_choices
lowercase__ : str = scope
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : str = None
if self.use_input_mask:
lowercase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Dict = None
lowercase__ : Optional[Any] = None
lowercase__ : int = None
if self.use_labels:
lowercase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ : List[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self ) -> Optional[int]:
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Dict:
lowercase__ : Tuple = DistilBertModel(config=a )
model.to(a )
model.eval()
lowercase__ : Any = model(a , a )
lowercase__ : str = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Dict:
lowercase__ : Optional[int] = DistilBertForMaskedLM(config=a )
model.to(a )
model.eval()
lowercase__ : Union[str, Any] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> int:
lowercase__ : Tuple = DistilBertForQuestionAnswering(config=a )
model.to(a )
model.eval()
lowercase__ : Tuple = model(
a , attention_mask=a , start_positions=a , end_positions=a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> List[str]:
lowercase__ : int = self.num_labels
lowercase__ : Dict = DistilBertForSequenceClassification(a )
model.to(a )
model.eval()
lowercase__ : Optional[Any] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Any:
lowercase__ : Any = self.num_labels
lowercase__ : List[str] = DistilBertForTokenClassification(config=a )
model.to(a )
model.eval()
lowercase__ : Any = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Tuple:
lowercase__ : List[Any] = self.num_choices
lowercase__ : Any = DistilBertForMultipleChoice(config=a )
model.to(a )
model.eval()
lowercase__ : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : int = model(
a , attention_mask=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Union[str, Any] = self.prepare_config_and_inputs()
((lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__)) : List[str] = config_and_inputs
lowercase__ : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : List[str] = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCamelCase__ : str = (
{
"feature-extraction": DistilBertModel,
"fill-mask": DistilBertForMaskedLM,
"question-answering": DistilBertForQuestionAnswering,
"text-classification": DistilBertForSequenceClassification,
"token-classification": DistilBertForTokenClassification,
"zero-shot": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : Any = True
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : Optional[Any] = True
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : str = DistilBertModelTester(self )
lowercase__ : int = ConfigTester(self , config_class=a , dim=3_7 )
def _UpperCAmelCase ( self ) -> Dict:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*a )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*a )
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*a )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*a )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*a )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*a )
@slow
def _UpperCAmelCase ( self ) -> str:
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : str = DistilBertModel.from_pretrained(a )
self.assertIsNotNone(a )
@slow
@require_torch_gpu
def _UpperCAmelCase ( self ) -> Any:
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
lowercase__ : Optional[int] = True
lowercase__ : Union[str, Any] = model_class(config=a )
lowercase__ : int = self._prepare_for_class(a , a )
lowercase__ : Tuple = torch.jit.trace(
a , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a , os.path.join(a , 'traced_model.pt' ) )
lowercase__ : Optional[int] = torch.jit.load(os.path.join(a , 'traced_model.pt' ) , map_location=a )
loaded(inputs_dict['input_ids'].to(a ) , inputs_dict['attention_mask'].to(a ) )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : int = DistilBertModel.from_pretrained('distilbert-base-uncased' )
lowercase__ : Union[str, Any] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
lowercase__ : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase__ : Optional[Any] = model(a , attention_mask=a )[0]
lowercase__ : Tuple = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , a )
lowercase__ : List[Any] = torch.tensor(
[[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) )
| 645
| 1
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
_UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a):
def __init__( self , *a , **a ) -> None:
warnings.warn(
'The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ChineseCLIPImageProcessor instead.' , a , )
super().__init__(*a , **a )
| 645
|
"""simple docstring"""
from __future__ import annotations
def a_ ( _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float , ):
'''simple docstring'''
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif stress < 0:
raise ValueError('Stress cannot be negative' )
elif tangential_force < 0:
raise ValueError('Tangential Force cannot be negative' )
elif area < 0:
raise ValueError('Area cannot be negative' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 645
| 1
|
"""simple docstring"""
from __future__ import annotations
import bisect
def a_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int = 0 , _lowerCAmelCase : int = -1 ):
'''simple docstring'''
if hi < 0:
lowercase__ : Union[str, Any] = len(_lowerCAmelCase )
while lo < hi:
lowercase__ : Dict = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
lowercase__ : str = mid + 1
else:
lowercase__ : List[str] = mid
return lo
def a_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int = 0 , _lowerCAmelCase : int = -1 ):
'''simple docstring'''
if hi < 0:
lowercase__ : str = len(_lowerCAmelCase )
while lo < hi:
lowercase__ : Union[str, Any] = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
lowercase__ : List[str] = mid + 1
else:
lowercase__ : Optional[int] = mid
return lo
def a_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int = 0 , _lowerCAmelCase : int = -1 ):
'''simple docstring'''
sorted_collection.insert(bisect_left(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
def a_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int = 0 , _lowerCAmelCase : int = -1 ):
'''simple docstring'''
sorted_collection.insert(bisect_right(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
def a_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : List[Any] = 0
lowercase__ : Optional[Any] = len(_lowerCAmelCase ) - 1
while left <= right:
lowercase__ : Optional[int] = left + (right - left) // 2
lowercase__ : str = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
lowercase__ : Union[str, Any] = midpoint - 1
else:
lowercase__ : Dict = midpoint + 1
return None
def a_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : Dict = bisect.bisect_left(_lowerCAmelCase , _lowerCAmelCase )
if index != len(_lowerCAmelCase ) and sorted_collection[index] == item:
return index
return None
def a_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
'''simple docstring'''
if right < left:
return None
lowercase__ : List[Any] = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , midpoint - 1 )
else:
return binary_search_by_recursion(_lowerCAmelCase , _lowerCAmelCase , midpoint + 1 , _lowerCAmelCase )
if __name__ == "__main__":
_UpperCamelCase : List[str] = input("Enter numbers separated by comma:\n").strip()
_UpperCamelCase : List[str] = sorted(int(item) for item in user_input.split(","))
_UpperCamelCase : str = int(input("Enter a single number to be found in the list:\n"))
_UpperCamelCase : Union[str, Any] = binary_search(collection, target)
if result is None:
print(f'''{target} was not found in {collection}.''')
else:
print(f'''{target} was found at position {result} in {collection}.''')
| 645
|
"""simple docstring"""
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
def __init__( self , a , a=1_3 , a=[3_0, 3_0] , a=2 , a=3 , a=True , a=True , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=1_0 , a=0.02 , a=3 , a=None , a=8 , a=1_0 , ) -> Any:
lowercase__ : List[str] = parent
lowercase__ : Optional[Any] = batch_size
lowercase__ : Optional[int] = image_size
lowercase__ : List[Any] = patch_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : str = is_training
lowercase__ : Optional[Any] = use_labels
lowercase__ : Optional[Any] = hidden_size
lowercase__ : Dict = num_hidden_layers
lowercase__ : Optional[Any] = num_attention_heads
lowercase__ : Dict = intermediate_size
lowercase__ : List[Any] = hidden_act
lowercase__ : List[Any] = hidden_dropout_prob
lowercase__ : Any = attention_probs_dropout_prob
lowercase__ : Any = type_sequence_label_size
lowercase__ : Dict = initializer_range
lowercase__ : Union[str, Any] = num_labels
lowercase__ : Tuple = scope
lowercase__ : Tuple = n_targets
lowercase__ : Optional[int] = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
lowercase__ : Optional[Any] = (image_size[1] // patch_size) * (image_size[0] // patch_size)
lowercase__ : Tuple = num_patches + 1 + self.num_detection_tokens
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
lowercase__ : Tuple = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
lowercase__ : int = []
for i in range(self.batch_size ):
lowercase__ : Optional[Any] = {}
lowercase__ : Any = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=a )
lowercase__ : List[str] = torch.rand(self.n_targets , 4 , device=a )
labels.append(a )
lowercase__ : Tuple = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> List[Any]:
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def _UpperCAmelCase ( self , a , a , a ) -> int:
lowercase__ : List[str] = YolosModel(config=a )
model.to(a )
model.eval()
lowercase__ : List[Any] = model(a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def _UpperCAmelCase ( self , a , a , a ) -> Union[str, Any]:
lowercase__ : str = YolosForObjectDetection(a )
model.to(a )
model.eval()
lowercase__ : Dict = model(pixel_values=a )
lowercase__ : Tuple = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
lowercase__ : str = model(pixel_values=a , labels=a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : int = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Any = config_and_inputs
lowercase__ : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : Optional[int] = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
lowerCamelCase__ : List[str] = (
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
lowerCamelCase__ : List[Any] = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Tuple = False
lowerCamelCase__ : Union[str, Any] = False
def _UpperCAmelCase ( self , a , a , a=False ) -> Dict:
lowercase__ : List[str] = super()._prepare_for_class(a , a , return_labels=a )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
lowercase__ : Optional[Any] = []
for i in range(self.model_tester.batch_size ):
lowercase__ : Dict = {}
lowercase__ : Dict = torch.ones(
size=(self.model_tester.n_targets,) , device=a , dtype=torch.long )
lowercase__ : Optional[Any] = torch.ones(
self.model_tester.n_targets , 4 , device=a , dtype=torch.float )
labels.append(a )
lowercase__ : Union[str, Any] = labels
return inputs_dict
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : Dict = YolosModelTester(self )
lowercase__ : Optional[int] = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=3_7 )
def _UpperCAmelCase ( self ) -> str:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Optional[Any]:
# YOLOS does not use inputs_embeds
pass
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : int = model_class(a )
lowercase__ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Tuple = [*signature.parameters.keys()]
lowercase__ : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Dict = True
# in YOLOS, the seq_len is different
lowercase__ : Tuple = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
lowercase__ : Optional[int] = True
lowercase__ : str = False
lowercase__ : str = True
lowercase__ : List[str] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Any = model(**self._prepare_for_class(a , a ) )
lowercase__ : str = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ : Optional[int] = True
lowercase__ : List[Any] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Union[str, Any] = model(**self._prepare_for_class(a , a ) )
lowercase__ : List[str] = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowercase__ : Dict = len(a )
# Check attention is always last and order is fine
lowercase__ : Any = True
lowercase__ : int = True
lowercase__ : int = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Any = model(**self._prepare_for_class(a , a ) )
lowercase__ : Optional[Any] = 1
self.assertEqual(out_len + added_hidden_states , len(a ) )
lowercase__ : Tuple = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _UpperCAmelCase ( self ) -> List[str]:
def check_hidden_states_output(a , a , a ):
lowercase__ : str = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : int = model(**self._prepare_for_class(a , a ) )
lowercase__ : int = outputs.hidden_states
lowercase__ : Any = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(a ) , a )
# YOLOS has a different seq_length
lowercase__ : Optional[int] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Any = True
check_hidden_states_output(a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : List[Any] = True
check_hidden_states_output(a , a , a )
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*a )
@slow
def _UpperCAmelCase ( self ) -> Union[str, Any]:
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : int = YolosModel.from_pretrained(a )
self.assertIsNotNone(a )
def a_ ( ):
'''simple docstring'''
lowercase__ : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
@cached_property
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return AutoImageProcessor.from_pretrained('hustvl/yolos-small' ) if is_vision_available() else None
@slow
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Dict = YolosForObjectDetection.from_pretrained('hustvl/yolos-small' ).to(a )
lowercase__ : Tuple = self.default_image_processor
lowercase__ : Optional[int] = prepare_img()
lowercase__ : int = image_processor(images=a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ : int = model(inputs.pixel_values )
# verify outputs
lowercase__ : Tuple = torch.Size((1, 1_0_0, 9_2) )
self.assertEqual(outputs.logits.shape , a )
lowercase__ : Any = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] , device=a , )
lowercase__ : List[str] = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] , device=a )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , a , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , a , atol=1e-4 ) )
# verify postprocessing
lowercase__ : Optional[Any] = image_processor.post_process_object_detection(
a , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
lowercase__ : str = torch.tensor([0.9_994, 0.9_790, 0.9_964, 0.9_972, 0.9_861] ).to(a )
lowercase__ : Any = [7_5, 7_5, 1_7, 6_3, 1_7]
lowercase__ : Optional[int] = torch.tensor([335.0_609, 79.3_848, 375.4_216, 187.2_495] ).to(a )
self.assertEqual(len(results['scores'] ) , 5 )
self.assertTrue(torch.allclose(results['scores'] , a , atol=1e-4 ) )
self.assertSequenceEqual(results['labels'].tolist() , a )
self.assertTrue(torch.allclose(results['boxes'][0, :] , a ) )
| 645
| 1
|
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : Optional[int] = "Speech2TextFeatureExtractor"
lowerCamelCase__ : Any = "Speech2TextTokenizer"
def __init__( self , a , a ) -> Union[str, Any]:
super().__init__(a , a )
lowercase__ : List[str] = self.feature_extractor
lowercase__ : str = False
def __call__( self , *a , **a ) -> Optional[int]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*a , **a )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
lowercase__ : int = kwargs.pop('raw_speech' )
else:
lowercase__ : Optional[Any] = kwargs.pop('audio' , a )
lowercase__ : Optional[int] = kwargs.pop('sampling_rate' , a )
lowercase__ : int = kwargs.pop('text' , a )
if len(a ) > 0:
lowercase__ : Dict = args[0]
lowercase__ : str = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
lowercase__ : Dict = self.feature_extractor(a , *a , sampling_rate=a , **a )
if text is not None:
lowercase__ : Optional[int] = self.tokenizer(a , **a )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowercase__ : Tuple = encodings['input_ids']
return inputs
def _UpperCAmelCase ( self , *a , **a ) -> str:
return self.tokenizer.batch_decode(*a , **a )
def _UpperCAmelCase ( self , *a , **a ) -> List[str]:
return self.tokenizer.decode(*a , **a )
@contextmanager
def _UpperCAmelCase ( self ) -> int:
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
lowercase__ : Optional[int] = True
lowercase__ : str = self.tokenizer
yield
lowercase__ : Union[str, Any] = self.feature_extractor
lowercase__ : List[Any] = False
| 645
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
_UpperCamelCase : int = logging.get_logger(__name__)
@dataclass
class UpperCAmelCase_ :
def __init__( self , a=False , a=False , a=6.0 , a=None , a=False , a=False , a=None , a="fp4" , a=False , **a , ) -> Tuple:
lowercase__ : str = load_in_abit
lowercase__ : str = load_in_abit
lowercase__ : List[str] = llm_inta_threshold
lowercase__ : Dict = llm_inta_skip_modules
lowercase__ : Tuple = llm_inta_enable_fpaa_cpu_offload
lowercase__ : Any = llm_inta_has_fpaa_weight
lowercase__ : Any = bnb_abit_quant_type
lowercase__ : Dict = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
lowercase__ : Dict = torch.floataa
elif isinstance(a , a ):
lowercase__ : Any = getattr(a , a )
elif isinstance(a , torch.dtype ):
lowercase__ : Any = bnb_abit_compute_dtype
else:
raise ValueError('bnb_4bit_compute_dtype must be a string or a torch.dtype' )
self.post_init()
def _UpperCAmelCase ( self ) -> str:
if not isinstance(self.llm_inta_threshold , a ):
raise ValueError('llm_int8_threshold must be a float' )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , a ):
raise ValueError('llm_int8_skip_modules must be a list of strings' )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , a ):
raise ValueError('llm_int8_enable_fp32_cpu_offload must be a boolean' )
if not isinstance(self.llm_inta_has_fpaa_weight , a ):
raise ValueError('llm_int8_has_fp16_weight must be a boolean' )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError('bnb_4bit_compute_dtype must be torch.dtype' )
if not isinstance(self.bnb_abit_quant_type , a ):
raise ValueError('bnb_4bit_quant_type must be a string' )
if not isinstance(self.bnb_abit_use_double_quant , a ):
raise ValueError('bnb_4bit_use_double_quant must be a boolean' )
if self.load_in_abit and not version.parse(importlib.metadata.version('bitsandbytes' ) ) >= version.parse(
'0.39.0' ):
raise ValueError(
'4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version' )
def _UpperCAmelCase ( self ) -> Tuple:
return self.load_in_abit or self.load_in_abit
def _UpperCAmelCase ( self ) -> List[str]:
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def _UpperCAmelCase ( cls , a , a , **a ) -> Optional[Any]:
lowercase__ : List[Any] = cls(**a )
lowercase__ : Union[str, Any] = []
for key, value in kwargs.items():
if hasattr(a , a ):
setattr(a , a , a )
to_remove.append(a )
for key in to_remove:
kwargs.pop(a , a )
if return_unused_kwargs:
return config, kwargs
else:
return config
def _UpperCAmelCase ( self , a ) -> Dict:
with open(a , 'w' , encoding='utf-8' ) as writer:
lowercase__ : Any = self.to_dict()
lowercase__ : str = json.dumps(a , indent=2 , sort_keys=a ) + '\n'
writer.write(a )
def _UpperCAmelCase ( self ) -> Dict[str, Any]:
lowercase__ : Optional[Any] = copy.deepcopy(self.__dict__ )
lowercase__ : Any = str(output['bnb_4bit_compute_dtype'] ).split('.' )[1]
return output
def __repr__( self ) -> Dict:
return f"""{self.__class__.__name__} {self.to_json_string()}"""
def _UpperCAmelCase ( self , a = True ) -> str:
if use_diff is True:
lowercase__ : List[Any] = self.to_diff_dict()
else:
lowercase__ : List[str] = self.to_dict()
return json.dumps(a , indent=2 , sort_keys=a ) + "\n"
def _UpperCAmelCase ( self ) -> Dict[str, Any]:
lowercase__ : Tuple = self.to_dict()
# get the default config dict
lowercase__ : Optional[Any] = BitsAndBytesConfig().to_dict()
lowercase__ : int = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
lowercase__ : Optional[int] = value
return serializable_config_dict
| 645
| 1
|
"""simple docstring"""
from random import shuffle
import tensorflow as tf
from numpy import array
def a_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple ):
'''simple docstring'''
lowercase__ : List[Any] = int(_lowerCAmelCase )
assert noofclusters < len(_lowerCAmelCase )
# Find out the dimensionality
lowercase__ : Tuple = len(vectors[0] )
# Will help select random centroids from among the available vectors
lowercase__ : int = list(range(len(_lowerCAmelCase ) ) )
shuffle(_lowerCAmelCase )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
lowercase__ : int = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
lowercase__ : List[Any] = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
lowercase__ : Dict = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(_lowerCAmelCase )
]
##These nodes will assign the centroid Variables the appropriate
##values
lowercase__ : Dict = tf.placeholder('float64' , [dim] )
lowercase__ : List[str] = []
for centroid in centroids:
cent_assigns.append(tf.assign(_lowerCAmelCase , _lowerCAmelCase ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
lowercase__ : str = [tf.Variable(0 ) for i in range(len(_lowerCAmelCase ) )]
##These nodes will assign an assignment Variable the appropriate
##value
lowercase__ : Tuple = tf.placeholder('int32' )
lowercase__ : List[str] = []
for assignment in assignments:
cluster_assigns.append(tf.assign(_lowerCAmelCase , _lowerCAmelCase ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
lowercase__ : int = tf.placeholder('float' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
lowercase__ : int = tf.reduce_mean(_lowerCAmelCase , 0 )
##Node for computing Euclidean distances
# Placeholders for input
lowercase__ : Tuple = tf.placeholder('float' , [dim] )
lowercase__ : Tuple = tf.placeholder('float' , [dim] )
lowercase__ : Optional[int] = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(_lowerCAmelCase , _lowerCAmelCase ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
lowercase__ : List[str] = tf.placeholder('float' , [noofclusters] )
lowercase__ : Optional[int] = tf.argmin(_lowerCAmelCase , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
lowercase__ : List[str] = tf.initialize_all_variables()
# Initialize all variables
sess.run(_lowerCAmelCase )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
lowercase__ : Optional[int] = 100
for _ in range(_lowerCAmelCase ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(_lowerCAmelCase ) ):
lowercase__ : Tuple = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
lowercase__ : Optional[int] = [
sess.run(_lowerCAmelCase , feed_dict={va: vect, va: sess.run(_lowerCAmelCase )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
lowercase__ : Dict = sess.run(
_lowerCAmelCase , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(_lowerCAmelCase ):
# Collect all the vectors assigned to this cluster
lowercase__ : Optional[int] = [
vectors[i]
for i in range(len(_lowerCAmelCase ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
lowercase__ : Optional[int] = sess.run(
_lowerCAmelCase , feed_dict={mean_input: array(_lowerCAmelCase )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
lowercase__ : List[str] = sess.run(_lowerCAmelCase )
lowercase__ : Dict = sess.run(_lowerCAmelCase )
return centroids, assignments
| 645
|
"""simple docstring"""
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_UpperCamelCase : int = 16
_UpperCamelCase : Union[str, Any] = 32
def a_ ( _lowerCAmelCase : Tuple ):
'''simple docstring'''
return int(x / 2**20 )
class UpperCAmelCase_ :
def __enter__( self ) -> Union[str, Any]:
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
lowercase__ : List[str] = torch.cuda.memory_allocated()
return self
def __exit__( self , *a ) -> Any:
gc.collect()
torch.cuda.empty_cache()
lowercase__ : Optional[Any] = torch.cuda.memory_allocated()
lowercase__ : Union[str, Any] = torch.cuda.max_memory_allocated()
lowercase__ : List[Any] = bamb(self.end - self.begin )
lowercase__ : List[Any] = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def a_ ( _lowerCAmelCase : Accelerator , _lowerCAmelCase : int = 16 , _lowerCAmelCase : str = "bert-base-cased" , _lowerCAmelCase : int = 320 , _lowerCAmelCase : int = 160 , ):
'''simple docstring'''
lowercase__ : List[Any] = AutoTokenizer.from_pretrained(_lowerCAmelCase )
lowercase__ : Union[str, Any] = load_dataset(
'glue' , 'mrpc' , split={'train': f"""train[:{n_train}]""", 'validation': f"""validation[:{n_val}]"""} )
def tokenize_function(_lowerCAmelCase : int ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ : List[str] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowercase__ : Union[str, Any] = datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=_lowerCAmelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ : Union[str, Any] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_lowerCAmelCase : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowerCAmelCase , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(_lowerCAmelCase , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
lowercase__ : Dict = DataLoader(
tokenized_datasets['train'] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
lowercase__ : Dict = DataLoader(
tokenized_datasets['validation'] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
return train_dataloader, eval_dataloader
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : List[str] ):
'''simple docstring'''
lowercase__ : List[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ : Optional[int] = config['lr']
lowercase__ : Optional[Any] = int(config['num_epochs'] )
lowercase__ : Optional[Any] = int(config['seed'] )
lowercase__ : int = int(config['batch_size'] )
lowercase__ : Union[str, Any] = args.model_name_or_path
set_seed(_lowerCAmelCase )
lowercase__ , lowercase__ : Tuple = get_dataloaders(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ : List[Any] = AutoModelForSequenceClassification.from_pretrained(_lowerCAmelCase , return_dict=_lowerCAmelCase )
# Instantiate optimizer
lowercase__ : List[Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowercase__ : Optional[Any] = optimizer_cls(params=model.parameters() , lr=_lowerCAmelCase )
if accelerator.state.deepspeed_plugin is not None:
lowercase__ : Optional[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
lowercase__ : List[Any] = 1
lowercase__ : List[Any] = (len(_lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowercase__ : Optional[int] = get_linear_schedule_with_warmup(
optimizer=_lowerCAmelCase , num_warmup_steps=0 , num_training_steps=_lowerCAmelCase , )
else:
lowercase__ : Tuple = DummyScheduler(_lowerCAmelCase , total_num_steps=_lowerCAmelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = accelerator.prepare(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# We need to keep track of how many total steps we have iterated over
lowercase__ : Optional[int] = 0
# We also need to keep track of the stating epoch so files are named properly
lowercase__ : Tuple = 0
# Now we train the model
lowercase__ : Optional[Any] = {}
for epoch in range(_lowerCAmelCase , _lowerCAmelCase ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(_lowerCAmelCase ):
lowercase__ : List[Any] = model(**_lowerCAmelCase )
lowercase__ : Dict = outputs.loss
lowercase__ : int = loss / gradient_accumulation_steps
accelerator.backward(_lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('Memory before entering the train : {}'.format(bamb(tracemalloc.begin ) ) )
accelerator.print('Memory consumed at the end of the train (end-begin): {}'.format(tracemalloc.used ) )
accelerator.print('Peak Memory consumed during the train (max-begin): {}'.format(tracemalloc.peaked ) )
accelerator.print(
'Total Peak Memory consumed during the train (max): {}'.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
lowercase__ : int = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'peak_memory_utilization.json' ) , 'w' ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
def a_ ( ):
'''simple docstring'''
lowercase__ : int = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=_lowerCAmelCase , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_lowerCAmelCase , )
parser.add_argument(
'--output_dir' , type=_lowerCAmelCase , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--peak_memory_upper_bound' , type=_lowerCAmelCase , default=_lowerCAmelCase , help='The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.' , )
parser.add_argument(
'--n_train' , type=_lowerCAmelCase , default=320 , help='Number of training examples to use.' , )
parser.add_argument(
'--n_val' , type=_lowerCAmelCase , default=160 , help='Number of validation examples to use.' , )
parser.add_argument(
'--num_epochs' , type=_lowerCAmelCase , default=1 , help='Number of train epochs.' , )
lowercase__ : Any = parser.parse_args()
lowercase__ : Optional[Any] = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
main()
| 645
| 1
|
"""simple docstring"""
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def a_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[str] ):
'''simple docstring'''
if isinstance(_lowerCAmelCase , torch.Tensor ):
return image
elif isinstance(_lowerCAmelCase , PIL.Image.Image ):
lowercase__ : Union[str, Any] = [image]
if isinstance(image[0] , PIL.Image.Image ):
lowercase__ : int = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
lowercase__ : str = np.concatenate(_lowerCAmelCase , axis=0 )
lowercase__ : Union[str, Any] = np.array(_lowerCAmelCase ).astype(np.floataa ) / 2_5_5.0
lowercase__ : Union[str, Any] = image.transpose(0 , 3 , 1 , 2 )
lowercase__ : Optional[int] = 2.0 * image - 1.0
lowercase__ : Union[str, Any] = torch.from_numpy(_lowerCAmelCase )
elif isinstance(image[0] , torch.Tensor ):
lowercase__ : Optional[int] = torch.cat(_lowerCAmelCase , dim=0 )
return image
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Tuple=0.9_9_9_5 ):
'''simple docstring'''
if not isinstance(_lowerCAmelCase , np.ndarray ):
lowercase__ : Optional[int] = True
lowercase__ : str = va.device
lowercase__ : Tuple = va.cpu().numpy()
lowercase__ : Any = va.cpu().numpy()
lowercase__ : Dict = np.sum(va * va / (np.linalg.norm(_lowerCAmelCase ) * np.linalg.norm(_lowerCAmelCase )) )
if np.abs(_lowerCAmelCase ) > DOT_THRESHOLD:
lowercase__ : Optional[int] = (1 - t) * va + t * va
else:
lowercase__ : Dict = np.arccos(_lowerCAmelCase )
lowercase__ : Optional[Any] = np.sin(_lowerCAmelCase )
lowercase__ : Union[str, Any] = theta_a * t
lowercase__ : List[Any] = np.sin(_lowerCAmelCase )
lowercase__ : str = np.sin(theta_a - theta_t ) / sin_theta_a
lowercase__ : Any = sin_theta_t / sin_theta_a
lowercase__ : Optional[Any] = sa * va + sa * va
if inputs_are_torch:
lowercase__ : List[str] = torch.from_numpy(_lowerCAmelCase ).to(_lowerCAmelCase )
return va
def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : Tuple ):
'''simple docstring'''
lowercase__ : Any = F.normalize(_lowerCAmelCase , dim=-1 )
lowercase__ : Dict = F.normalize(_lowerCAmelCase , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def a_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
for param in model.parameters():
lowercase__ : Optional[int] = value
class UpperCAmelCase_ ( _a):
def __init__( self , a , a , a , a , a , a , a , a=None , a=None , a=None , ) -> Any:
super().__init__()
self.register_modules(
vae=a , text_encoder=a , clip_model=a , tokenizer=a , unet=a , scheduler=a , feature_extractor=a , coca_model=a , coca_tokenizer=a , coca_transform=a , )
lowercase__ : Tuple = (
feature_extractor.size
if isinstance(feature_extractor.size , a )
else feature_extractor.size['shortest_edge']
)
lowercase__ : Optional[int] = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , a )
set_requires_grad(self.clip_model , a )
def _UpperCAmelCase ( self , a = "auto" ) -> List[Any]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowercase__ : List[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(a )
def _UpperCAmelCase ( self ) -> Dict:
self.enable_attention_slicing(a )
def _UpperCAmelCase ( self ) -> Any:
set_requires_grad(self.vae , a )
def _UpperCAmelCase ( self ) -> Optional[Any]:
set_requires_grad(self.vae , a )
def _UpperCAmelCase ( self ) -> int:
set_requires_grad(self.unet , a )
def _UpperCAmelCase ( self ) -> Any:
set_requires_grad(self.unet , a )
def _UpperCAmelCase ( self , a , a , a ) -> int:
# get the original timestep using init_timestep
lowercase__ : int = min(int(num_inference_steps * strength ) , a )
lowercase__ : Optional[Any] = max(num_inference_steps - init_timestep , 0 )
lowercase__ : List[Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _UpperCAmelCase ( self , a , a , a , a , a , a=None ) -> Tuple:
if not isinstance(a , torch.Tensor ):
raise ValueError(f"""`image` has to be of type `torch.Tensor` but is {type(a )}""" )
lowercase__ : str = image.to(device=a , dtype=a )
if isinstance(a , a ):
lowercase__ : Optional[Any] = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(a )
]
lowercase__ : Tuple = torch.cat(a , dim=0 )
else:
lowercase__ : Any = self.vae.encode(a ).latent_dist.sample(a )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowercase__ : int = 0.18_215 * init_latents
lowercase__ : int = init_latents.repeat_interleave(a , dim=0 )
lowercase__ : Dict = randn_tensor(init_latents.shape , generator=a , device=a , dtype=a )
# get latents
lowercase__ : int = self.scheduler.add_noise(a , a , a )
lowercase__ : List[Any] = init_latents
return latents
def _UpperCAmelCase ( self , a ) -> List[str]:
lowercase__ : List[Any] = self.coca_transform(a ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
lowercase__ : Any = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
lowercase__ : Optional[int] = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('<end_of_text>' )[0].replace('<start_of_text>' , '' ).rstrip(' .,' )
def _UpperCAmelCase ( self , a , a ) -> List[Any]:
lowercase__ : int = self.feature_extractor.preprocess(a )
lowercase__ : Optional[Any] = torch.from_numpy(clip_image_input['pixel_values'][0] ).unsqueeze(0 ).to(self.device ).half()
lowercase__ : List[str] = self.clip_model.get_image_features(a )
lowercase__ : Tuple = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=a )
lowercase__ : Optional[Any] = image_embeddings_clip.repeat_interleave(a , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def _UpperCAmelCase ( self , a , a , a , a , a , a , a , ) -> str:
lowercase__ : Tuple = latents.detach().requires_grad_()
lowercase__ : Dict = self.scheduler.scale_model_input(a , a )
# predict the noise residual
lowercase__ : int = self.unet(a , a , encoder_hidden_states=a ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
lowercase__ : int = self.scheduler.alphas_cumprod[timestep]
lowercase__ : Tuple = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowercase__ : Any = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
lowercase__ : List[str] = torch.sqrt(a )
lowercase__ : Any = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , a ):
lowercase__ : Dict = self.scheduler.sigmas[index]
lowercase__ : List[str] = latents - sigma * noise_pred
else:
raise ValueError(f"""scheduler type {type(self.scheduler )} not supported""" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowercase__ : str = 1 / 0.18_215 * sample
lowercase__ : int = self.vae.decode(a ).sample
lowercase__ : int = (image / 2 + 0.5).clamp(0 , 1 )
lowercase__ : Dict = transforms.Resize(self.feature_extractor_size )(a )
lowercase__ : Optional[int] = self.normalize(a ).to(latents.dtype )
lowercase__ : str = self.clip_model.get_image_features(a )
lowercase__ : Tuple = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=a )
lowercase__ : Dict = spherical_dist_loss(a , a ).mean() * clip_guidance_scale
lowercase__ : Tuple = -torch.autograd.grad(a , a )[0]
if isinstance(self.scheduler , a ):
lowercase__ : int = latents.detach() + grads * (sigma**2)
lowercase__ : Optional[Any] = noise_pred_original
else:
lowercase__ : List[Any] = noise_pred_original - torch.sqrt(a ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self , a , a , a = None , a = None , a = 5_1_2 , a = 5_1_2 , a = 0.6 , a = 5_0 , a = 7.5 , a = 1 , a = 0.0 , a = 1_0_0 , a = None , a = "pil" , a = True , a = 0.8 , a = 0.1 , a = 0.1 , ) -> Dict:
if isinstance(a , a ) and len(a ) != batch_size:
raise ValueError(f"""You have passed {batch_size} batch_size, but only {len(a )} generators.""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if isinstance(a , torch.Generator ) and batch_size > 1:
lowercase__ : Optional[int] = [generator] + [None] * (batch_size - 1)
lowercase__ : Tuple = [
('model', self.coca_model is None),
('tokenizer', self.coca_tokenizer is None),
('transform', self.coca_transform is None),
]
lowercase__ : Optional[Any] = [x[0] for x in coca_is_none if x[1]]
lowercase__ : Dict = ', '.join(a )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(a ):
raise ValueError(
f"""Content prompt is None and CoCa [{coca_is_none_str}] is None."""
f"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
lowercase__ : int = self.get_image_description(a )
if style_prompt is None:
if len(a ):
raise ValueError(
f"""Style prompt is None and CoCa [{coca_is_none_str}] is None."""
f""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
lowercase__ : List[str] = self.get_image_description(a )
# get prompt text embeddings for content and style
lowercase__ : Any = self.tokenizer(
a , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=a , return_tensors='pt' , )
lowercase__ : Optional[Any] = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
lowercase__ : Tuple = self.tokenizer(
a , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=a , return_tensors='pt' , )
lowercase__ : Union[str, Any] = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
lowercase__ : Union[str, Any] = slerp(a , a , a )
# duplicate text embeddings for each generation per prompt
lowercase__ : Any = text_embeddings.repeat_interleave(a , dim=0 )
# set timesteps
lowercase__ : List[str] = 'offset' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
lowercase__ : Optional[Any] = {}
if accepts_offset:
lowercase__ : Dict = 1
self.scheduler.set_timesteps(a , **a )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
lowercase__ , lowercase__ : Optional[int] = self.get_timesteps(a , a , self.device )
lowercase__ : List[Any] = timesteps[:1].repeat(a )
# Preprocess image
lowercase__ : Tuple = preprocess(a , a , a )
lowercase__ : int = self.prepare_latents(
a , a , a , text_embeddings.dtype , self.device , a )
lowercase__ : List[Any] = preprocess(a , a , a )
lowercase__ : Union[str, Any] = self.prepare_latents(
a , a , a , text_embeddings.dtype , self.device , a )
lowercase__ : str = slerp(a , a , a )
if clip_guidance_scale > 0:
lowercase__ : List[str] = self.get_clip_image_embeddings(a , a )
lowercase__ : Any = self.get_clip_image_embeddings(a , a )
lowercase__ : Dict = slerp(
a , a , a )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowercase__ : Tuple = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowercase__ : Optional[int] = content_text_input.input_ids.shape[-1]
lowercase__ : Optional[Any] = self.tokenizer([''] , padding='max_length' , max_length=a , return_tensors='pt' )
lowercase__ : Optional[int] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
lowercase__ : Optional[int] = uncond_embeddings.repeat_interleave(a , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase__ : Tuple = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowercase__ : Tuple = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
lowercase__ : str = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
lowercase__ : Tuple = torch.randn(a , generator=a , device='cpu' , dtype=a ).to(
self.device )
else:
lowercase__ : Any = torch.randn(a , generator=a , device=self.device , dtype=a )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
lowercase__ : Tuple = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowercase__ : Union[str, Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase__ : Any = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase__ : Optional[int] = {}
if accepts_eta:
lowercase__ : str = eta
# check if the scheduler accepts generator
lowercase__ : List[Any] = 'generator' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
lowercase__ : str = generator
with self.progress_bar(total=a ):
for i, t in enumerate(a ):
# expand the latents if we are doing classifier free guidance
lowercase__ : List[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ : Dict = self.scheduler.scale_model_input(a , a )
# predict the noise residual
lowercase__ : List[Any] = self.unet(a , a , encoder_hidden_states=a ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
lowercase__ , lowercase__ : List[str] = noise_pred.chunk(2 )
lowercase__ : Optional[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
lowercase__ : int = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
lowercase__ , lowercase__ : int = self.cond_fn(
a , a , a , a , a , a , a , )
# compute the previous noisy sample x_t -> x_t-1
lowercase__ : Optional[Any] = self.scheduler.step(a , a , a , **a ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowercase__ : int = 1 / 0.18_215 * latents
lowercase__ : List[Any] = self.vae.decode(a ).sample
lowercase__ : Dict = (image / 2 + 0.5).clamp(0 , 1 )
lowercase__ : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase__ : Tuple = self.numpy_to_pil(a )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=a , nsfw_content_detected=a )
| 645
|
"""simple docstring"""
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : Any = [0] * len(_lowerCAmelCase )
for i in range(1 , len(_lowerCAmelCase ) ):
# use last results for better performance - dynamic programming
lowercase__ : List[str] = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
lowercase__ : Dict = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
lowercase__ : Union[str, Any] = j
return prefix_result
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
return max(prefix_function(_lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 645
| 1
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_UpperCamelCase : Optional[int] = logging.get_logger(__name__)
_UpperCamelCase : Dict = {
"SenseTime/deformable-detr": "https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : str = "deformable_detr"
lowerCamelCase__ : List[Any] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , a=True , a=None , a=3 , a=3_0_0 , a=1_0_2_4 , a=6 , a=1_0_2_4 , a=8 , a=6 , a=1_0_2_4 , a=8 , a=0.0 , a=True , a="relu" , a=2_5_6 , a=0.1 , a=0.0 , a=0.0 , a=0.02 , a=1.0 , a=True , a=False , a="sine" , a="resnet50" , a=True , a=False , a=4 , a=4 , a=4 , a=False , a=3_0_0 , a=False , a=1 , a=5 , a=2 , a=1 , a=1 , a=5 , a=2 , a=0.1 , a=0.25 , a=False , **a , ) -> Any:
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
lowercase__ : List[Any] = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(a , a ):
lowercase__ : Union[str, Any] = backbone_config.get('model_type' )
lowercase__ : List[Any] = CONFIG_MAPPING[backbone_model_type]
lowercase__ : Union[str, Any] = config_class.from_dict(a )
lowercase__ : List[Any] = use_timm_backbone
lowercase__ : int = backbone_config
lowercase__ : Optional[int] = num_channels
lowercase__ : Optional[Any] = num_queries
lowercase__ : List[Any] = max_position_embeddings
lowercase__ : List[str] = d_model
lowercase__ : List[str] = encoder_ffn_dim
lowercase__ : List[str] = encoder_layers
lowercase__ : List[Any] = encoder_attention_heads
lowercase__ : str = decoder_ffn_dim
lowercase__ : Optional[Any] = decoder_layers
lowercase__ : List[Any] = decoder_attention_heads
lowercase__ : Optional[Any] = dropout
lowercase__ : int = attention_dropout
lowercase__ : Union[str, Any] = activation_dropout
lowercase__ : Tuple = activation_function
lowercase__ : Optional[int] = init_std
lowercase__ : List[str] = init_xavier_std
lowercase__ : Tuple = encoder_layerdrop
lowercase__ : List[str] = auxiliary_loss
lowercase__ : Tuple = position_embedding_type
lowercase__ : Tuple = backbone
lowercase__ : List[Any] = use_pretrained_backbone
lowercase__ : List[Any] = dilation
# deformable attributes
lowercase__ : Dict = num_feature_levels
lowercase__ : Optional[Any] = encoder_n_points
lowercase__ : Union[str, Any] = decoder_n_points
lowercase__ : List[Any] = two_stage
lowercase__ : str = two_stage_num_proposals
lowercase__ : Optional[int] = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.' )
# Hungarian matcher
lowercase__ : int = class_cost
lowercase__ : List[Any] = bbox_cost
lowercase__ : Optional[Any] = giou_cost
# Loss coefficients
lowercase__ : List[Any] = mask_loss_coefficient
lowercase__ : List[Any] = dice_loss_coefficient
lowercase__ : Tuple = bbox_loss_coefficient
lowercase__ : int = giou_loss_coefficient
lowercase__ : Any = eos_coefficient
lowercase__ : str = focal_alpha
lowercase__ : Any = disable_custom_kernels
super().__init__(is_encoder_decoder=a , **a )
@property
def _UpperCAmelCase ( self ) -> int:
return self.encoder_attention_heads
@property
def _UpperCAmelCase ( self ) -> int:
return self.d_model
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : Dict = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowercase__ : Optional[Any] = self.backbone_config.to_dict()
lowercase__ : Optional[Any] = self.__class__.model_type
return output
| 645
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCAmelCase_ ( unittest.TestCase):
def __init__( self , a , a=7 , a=3 , a=1_8 , a=3_0 , a=4_0_0 , a=True , a=None , a=True , a=None , a=True , ) -> List[str]:
lowercase__ : Tuple = size if size is not None else {'shortest_edge': 2_0}
lowercase__ : Union[str, Any] = crop_size if crop_size is not None else {'height': 1_8, 'width': 1_8}
lowercase__ : Optional[int] = parent
lowercase__ : Optional[int] = batch_size
lowercase__ : str = num_channels
lowercase__ : Any = image_size
lowercase__ : Optional[Any] = min_resolution
lowercase__ : int = max_resolution
lowercase__ : List[Any] = do_resize
lowercase__ : List[str] = size
lowercase__ : str = do_center_crop
lowercase__ : List[Any] = crop_size
lowercase__ : Union[str, Any] = do_flip_channel_order
def _UpperCAmelCase ( self ) -> int:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class UpperCAmelCase_ ( _a , unittest.TestCase):
lowerCamelCase__ : Optional[Any] = MobileViTImageProcessor if is_vision_available() else None
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Tuple = MobileViTImageProcessingTester(self )
@property
def _UpperCAmelCase ( self ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , 'do_resize' ) )
self.assertTrue(hasattr(a , 'size' ) )
self.assertTrue(hasattr(a , 'do_center_crop' ) )
self.assertTrue(hasattr(a , 'center_crop' ) )
self.assertTrue(hasattr(a , 'do_flip_channel_order' ) )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 2_0} )
self.assertEqual(image_processor.crop_size , {'height': 1_8, 'width': 1_8} )
lowercase__ : str = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {'shortest_edge': 4_2} )
self.assertEqual(image_processor.crop_size , {'height': 8_4, 'width': 8_4} )
def _UpperCAmelCase ( self ) -> Tuple:
pass
def _UpperCAmelCase ( self ) -> str:
# Initialize image_processing
lowercase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
lowercase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase__ : List[Any] = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _UpperCAmelCase ( self ) -> Tuple:
# Initialize image_processing
lowercase__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
lowercase__ : List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase__ : Any = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _UpperCAmelCase ( self ) -> Dict:
# Initialize image_processing
lowercase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
lowercase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase__ : Tuple = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 645
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
_UpperCamelCase : Tuple = {
"google/canine-s": "https://huggingface.co/google/canine-s/resolve/main/config.json",
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : Optional[int] = "canine"
def __init__( self , a=7_6_8 , a=1_2 , a=1_2 , a=3_0_7_2 , a="gelu" , a=0.1 , a=0.1 , a=1_6_3_8_4 , a=1_6 , a=0.02 , a=1e-12 , a=0 , a=0XE_000 , a=0XE_001 , a=4 , a=4 , a=8 , a=1_6_3_8_4 , a=1_2_8 , **a , ) -> Optional[int]:
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a )
lowercase__ : str = max_position_embeddings
lowercase__ : Optional[int] = hidden_size
lowercase__ : Any = num_hidden_layers
lowercase__ : Dict = num_attention_heads
lowercase__ : Any = intermediate_size
lowercase__ : Any = hidden_act
lowercase__ : Optional[int] = hidden_dropout_prob
lowercase__ : Optional[Any] = attention_probs_dropout_prob
lowercase__ : Dict = initializer_range
lowercase__ : List[str] = type_vocab_size
lowercase__ : Union[str, Any] = layer_norm_eps
# Character config:
lowercase__ : Any = downsampling_rate
lowercase__ : List[str] = upsampling_kernel_size
lowercase__ : Optional[int] = num_hash_functions
lowercase__ : Optional[Any] = num_hash_buckets
lowercase__ : Optional[int] = local_transformer_stride
| 645
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class UpperCAmelCase_ ( unittest.TestCase):
def __init__( self , a , a=1_3 , a=7 , a=True , a=True , a=True , a=True , a=9_9 , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=5_1_2 , a=1_6 , a=2 , a=0.02 , a=4 , ) -> Dict:
lowercase__ : Optional[Any] = parent
lowercase__ : Dict = batch_size
lowercase__ : List[Any] = seq_length
lowercase__ : int = is_training
lowercase__ : str = use_attention_mask
lowercase__ : Dict = use_token_type_ids
lowercase__ : Optional[int] = use_labels
lowercase__ : Tuple = vocab_size
lowercase__ : List[str] = hidden_size
lowercase__ : Union[str, Any] = num_hidden_layers
lowercase__ : int = num_attention_heads
lowercase__ : Dict = intermediate_size
lowercase__ : List[str] = hidden_act
lowercase__ : Dict = hidden_dropout_prob
lowercase__ : Tuple = attention_probs_dropout_prob
lowercase__ : List[str] = max_position_embeddings
lowercase__ : int = type_vocab_size
lowercase__ : List[str] = type_sequence_label_size
lowercase__ : Union[str, Any] = initializer_range
lowercase__ : Optional[int] = num_choices
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : str = None
if self.use_attention_mask:
lowercase__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : List[str] = None
if self.use_token_type_ids:
lowercase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ : Any = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Optional[int] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Tuple = config_and_inputs
lowercase__ : Union[str, Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase_ ( _a , unittest.TestCase):
lowerCamelCase__ : Tuple = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Union[str, Any] = FlaxAlbertModelTester(self )
@slow
def _UpperCAmelCase ( self ) -> str:
for model_class_name in self.all_model_classes:
lowercase__ : str = model_class_name.from_pretrained('albert-base-v2' )
lowercase__ : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(a )
@require_flax
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : str = FlaxAlbertModel.from_pretrained('albert-base-v2' )
lowercase__ : Optional[int] = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
lowercase__ : Optional[Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowercase__ : Any = model(a , attention_mask=a )[0]
lowercase__ : Tuple = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , a )
lowercase__ : Optional[Any] = np.array(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) )
| 645
| 1
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
_UpperCamelCase : int = logging.get_logger(__name__)
_UpperCamelCase : Optional[int] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_UpperCamelCase : str = {
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
_UpperCamelCase : Optional[int] = {
"yjernite/retribert-base-uncased": 5_12,
}
_UpperCamelCase : Any = {
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : Any = VOCAB_FILES_NAMES
lowerCamelCase__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : List[Any] = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase__ : Any = RetriBertTokenizer
lowerCamelCase__ : Optional[Any] = ["input_ids", "attention_mask"]
def __init__( self , a=None , a=None , a=True , a="[UNK]" , a="[SEP]" , a="[PAD]" , a="[CLS]" , a="[MASK]" , a=True , a=None , **a , ) -> Union[str, Any]:
super().__init__(
a , tokenizer_file=a , do_lower_case=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , tokenize_chinese_chars=a , strip_accents=a , **a , )
lowercase__ : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , a ) != do_lower_case
or normalizer_state.get('strip_accents' , a ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , a ) != tokenize_chinese_chars
):
lowercase__ : str = getattr(a , normalizer_state.pop('type' ) )
lowercase__ : List[Any] = do_lower_case
lowercase__ : str = strip_accents
lowercase__ : int = tokenize_chinese_chars
lowercase__ : Optional[int] = normalizer_class(**a )
lowercase__ : Dict = do_lower_case
def _UpperCAmelCase ( self , a , a=None ) -> Optional[Any]:
lowercase__ : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCAmelCase ( self , a , a = None ) -> List[int]:
lowercase__ : int = [self.sep_token_id]
lowercase__ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCAmelCase ( self , a , a = None ) -> Tuple[str]:
lowercase__ : List[str] = self._tokenizer.model.save(a , name=a )
return tuple(a )
| 645
|
"""simple docstring"""
from collections.abc import Sequence
def a_ ( _lowerCAmelCase : Sequence[float] , _lowerCAmelCase : float ):
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(_lowerCAmelCase ) )
def a_ ( _lowerCAmelCase : Sequence[float] , _lowerCAmelCase : float ):
'''simple docstring'''
lowercase__ : int = 0.0
for coeff in reversed(_lowerCAmelCase ):
lowercase__ : List[Any] = result * x + coeff
return result
if __name__ == "__main__":
_UpperCamelCase : int = (0.0, 0.0, 5.0, 9.3, 7.0)
_UpperCamelCase : Dict = 1_0.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 645
| 1
|
"""simple docstring"""
from __future__ import annotations
from random import choice
def a_ ( _lowerCAmelCase : List[Any] ):
'''simple docstring'''
return choice(_lowerCAmelCase )
def a_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : Optional[Any] = random_pivot(_lowerCAmelCase )
# partition based on pivot
# linear time
lowercase__ : Optional[int] = [e for e in lst if e < pivot]
lowercase__ : str = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(_lowerCAmelCase ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(_lowerCAmelCase ) < k - 1:
return kth_number(_lowerCAmelCase , k - len(_lowerCAmelCase ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 645
|
"""simple docstring"""
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
_UpperCamelCase : Any = [
{"dataset": "wikipedia", "config_name": "20220301.de"},
{"dataset": "wikipedia", "config_name": "20220301.en"},
{"dataset": "wikipedia", "config_name": "20220301.fr"},
{"dataset": "wikipedia", "config_name": "20220301.frr"},
{"dataset": "wikipedia", "config_name": "20220301.it"},
{"dataset": "wikipedia", "config_name": "20220301.simple"},
{"dataset": "snli", "config_name": "plain_text"},
{"dataset": "eli5", "config_name": "LFQA_reddit"},
{"dataset": "wiki40b", "config_name": "en"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"},
{"dataset": "natural_questions", "config_name": "default"},
]
def a_ ( _lowerCAmelCase : Optional[Any]=True ):
'''simple docstring'''
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_a))
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : str = None
lowerCamelCase__ : Optional[Any] = None
def _UpperCAmelCase ( self , a , a ) -> List[Any]:
with TemporaryDirectory() as tmp_dir:
lowercase__ : List[str] = dataset_module_factory(a , cache_dir=a )
lowercase__ : List[Any] = import_main_class(dataset_module.module_path , dataset=a )
lowercase__ : DatasetBuilder = builder_cls(
cache_dir=a , config_name=a , hash=dataset_module.hash , )
lowercase__ : Union[str, Any] = '/'.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=a ).replace(os.sep , '/' ),
config.DATASET_INFO_FILENAME,
] )
lowercase__ : Union[str, Any] = cached_path(a , cache_dir=a )
self.assertTrue(os.path.exists(a ) )
@pytest.mark.integration
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : Union[str, Any] = tmp_path_factory.mktemp('test_hf_gcp' ) / 'test_wikipedia_simple'
lowercase__ : int = dataset_module_factory('wikipedia' , cache_dir=_lowerCAmelCase )
lowercase__ : Optional[int] = import_main_class(dataset_module.module_path )
lowercase__ : DatasetBuilder = builder_cls(
cache_dir=_lowerCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
lowercase__ : Optional[int] = None
builder_instance.download_and_prepare()
lowercase__ : Optional[int] = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def a_ ( _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : Optional[int] = dataset_module_factory('wikipedia' , cache_dir=_lowerCAmelCase )
lowercase__ : List[str] = import_main_class(dataset_module.module_path , dataset=_lowerCAmelCase )
lowercase__ : DatasetBuilder = builder_cls(
cache_dir=_lowerCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
lowercase__ : Union[str, Any] = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert "train" in ds
assert isinstance(ds['train'] , _lowerCAmelCase )
assert next(iter(ds['train'] ) )
| 645
| 1
|
"""simple docstring"""
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class UpperCAmelCase_ :
lowerCamelCase__ : List[Any] = None
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
lowercase__ : Union[str, Any] = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , a )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ : Any = os.path.join(a , 'feat_extract.json' )
feat_extract_first.to_json_file(a )
lowercase__ : List[str] = self.feature_extraction_class.from_json_file(a )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ : List[str] = feat_extract_first.save_pretrained(a )[0]
check_json_file_has_correct_format(a )
lowercase__ : Tuple = self.feature_extraction_class.from_pretrained(a )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Union[str, Any] = self.feature_extraction_class()
self.assertIsNotNone(a )
| 645
|
"""simple docstring"""
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def a_ ( _lowerCAmelCase : dict ):
'''simple docstring'''
return (data["data"], data["target"])
def a_ ( _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray ):
'''simple docstring'''
lowercase__ : Any = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(_lowerCAmelCase , _lowerCAmelCase )
# Predict target for test data
lowercase__ : str = xgb.predict(_lowerCAmelCase )
lowercase__ : Union[str, Any] = predictions.reshape(len(_lowerCAmelCase ) , 1 )
return predictions
def a_ ( ):
'''simple docstring'''
lowercase__ : Optional[Any] = fetch_california_housing()
lowercase__ , lowercase__ : str = data_handling(_lowerCAmelCase )
lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = train_test_split(
_lowerCAmelCase , _lowerCAmelCase , test_size=0.2_5 , random_state=1 )
lowercase__ : Any = xgboost(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Error printing
print(f"""Mean Absolute Error : {mean_absolute_error(_lowerCAmelCase , _lowerCAmelCase )}""" )
print(f"""Mean Square Error : {mean_squared_error(_lowerCAmelCase , _lowerCAmelCase )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 645
| 1
|
"""simple docstring"""
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def a_ ( _lowerCAmelCase : Tuple ):
'''simple docstring'''
lowercase__ : Union[str, Any] = int(_lowerCAmelCase )
lowercase__ , lowercase__ , lowercase__ : Tuple = t // 3600, (t // 60) % 60, t % 60
return f"""{h}:{m:02d}:{s:02d}""" if h != 0 else f"""{m:02d}:{s:02d}"""
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any]=300 ):
'''simple docstring'''
return f"""
<div>
{prefix}
<progress value='{value}' max='{total}' style='width:{width}px; height:20px; vertical-align: middle;'></progress>
{label}
</div>
"""
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : List[Any] = '<table border="1" class="dataframe">\n'
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f""" <th>{i}</th>\n"""
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
lowercase__ : Dict = f"""{elt:.6f}""" if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else str(_lowerCAmelCase )
html_code += f""" <td>{elt}</td>\n"""
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class UpperCAmelCase_ :
lowerCamelCase__ : Tuple = 5
lowerCamelCase__ : Optional[Any] = 0.2
def __init__( self , a , a = None , a = True , a = None , a = 3_0_0 , ) -> Union[str, Any]:
lowercase__ : Optional[int] = total
lowercase__ : Union[str, Any] = '' if prefix is None else prefix
lowercase__ : Optional[int] = leave
lowercase__ : Union[str, Any] = parent
lowercase__ : int = width
lowercase__ : Optional[Any] = None
lowercase__ : Dict = None
lowercase__ : List[Any] = None
def _UpperCAmelCase ( self , a , a = False , a = None ) -> Optional[int]:
lowercase__ : List[Any] = value
if comment is not None:
lowercase__ : Optional[int] = comment
if self.last_value is None:
lowercase__ : Dict = time.time()
lowercase__ : Optional[Any] = value
lowercase__ : Any = None
lowercase__ : Union[str, Any] = self.warmup
lowercase__ : List[str] = 1
self.update_bar(a )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
lowercase__ : Union[str, Any] = time.time()
lowercase__ : Any = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
lowercase__ : Optional[int] = self.elapsed_time / (value - self.start_value)
else:
lowercase__ : Tuple = None
if value >= self.total:
lowercase__ : str = self.total
lowercase__ : int = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
lowercase__ : Tuple = self.average_time_per_item * (self.total - value)
self.update_bar(a )
lowercase__ : Any = value
lowercase__ : str = current_time
if self.average_time_per_item is None:
lowercase__ : List[str] = 1
else:
lowercase__ : Optional[Any] = max(int(self.update_every / self.average_time_per_item ) , 1 )
def _UpperCAmelCase ( self , a , a=None ) -> Optional[int]:
lowercase__ : Optional[Any] = ' ' * (len(str(self.total ) ) - len(str(a ) )) + str(a )
if self.elapsed_time is None:
lowercase__ : Optional[int] = f"""[{spaced_value}/{self.total} : < :"""
elif self.predicted_remaining is None:
lowercase__ : Optional[Any] = f"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )}"""
else:
lowercase__ : Optional[int] = (
f"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <"""
f""" {format_time(self.predicted_remaining )}"""
)
self.label += f""", {1/self.average_time_per_item:.2f} it/s"""
self.label += "]" if self.comment is None or len(self.comment ) == 0 else f""", {self.comment}]"""
self.display()
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : List[str] = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
lowercase__ : Union[str, Any] = disp.display(disp.HTML(self.html_code ) , display_id=a )
else:
self.output.update(disp.HTML(self.html_code ) )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('' ) )
class UpperCAmelCase_ ( _a):
def __init__( self , a , a=None ) -> Tuple:
super().__init__(a )
lowercase__ : Union[str, Any] = None if column_names is None else [column_names]
lowercase__ : Optional[int] = None
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Any = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
lowercase__ : str = disp.display(disp.HTML(self.html_code ) , display_id=a )
else:
self.output.update(disp.HTML(self.html_code ) )
def _UpperCAmelCase ( self , a ) -> str:
if self.inner_table is None:
lowercase__ : Optional[int] = [list(values.keys() ), list(values.values() )]
else:
lowercase__ : List[str] = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(a )
lowercase__ : List[str] = columns
self.inner_table.append([values[c] for c in columns] )
def _UpperCAmelCase ( self , a , a=None , a=3_0_0 ) -> Union[str, Any]:
lowercase__ : Any = NotebookProgressBar(a , prefix=a , parent=self , width=a )
return self.child_bar
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : str = None
self.display()
class UpperCAmelCase_ ( _a):
def __init__( self ) -> Any:
lowercase__ : Tuple = None
lowercase__ : Any = None
lowercase__ : List[Any] = False
def _UpperCAmelCase ( self , a , a , a , **a ) -> List[str]:
lowercase__ : Tuple = 'Epoch' if args.evaluation_strategy == IntervalStrategy.EPOCH else 'Step'
lowercase__ : int = 0
lowercase__ : Tuple = 0
lowercase__ : Tuple = [self.first_column] + ['Training Loss']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('Validation Loss' )
lowercase__ : Optional[int] = NotebookTrainingTracker(state.max_steps , a )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Optional[int]:
lowercase__ : int = int(state.epoch ) if int(state.epoch ) == state.epoch else f"""{state.epoch:.2f}"""
self.training_tracker.update(
state.global_step + 1 , comment=f"""Epoch {epoch}/{state.num_train_epochs}""" , force_update=self._force_next_update , )
lowercase__ : int = False
def _UpperCAmelCase ( self , a , a , a , a=None , **a ) -> Optional[Any]:
if not has_length(a ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
lowercase__ : List[Any] = self.training_tracker.add_child(len(a ) )
else:
lowercase__ : Any = NotebookProgressBar(len(a ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Tuple:
if self.prediction_bar is not None:
self.prediction_bar.close()
lowercase__ : str = None
def _UpperCAmelCase ( self , a , a , a , a=None , **a ) -> List[Any]:
# Only for when there is no evaluation
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
lowercase__ : Any = {'Training Loss': logs['loss']}
# First column is necessarily Step sine we're not in epoch eval strategy
lowercase__ : Optional[Any] = state.global_step
self.training_tracker.write_line(a )
def _UpperCAmelCase ( self , a , a , a , a=None , **a ) -> List[str]:
if self.training_tracker is not None:
lowercase__ : List[Any] = {'Training Loss': 'No log', 'Validation Loss': 'No log'}
for log in reversed(state.log_history ):
if "loss" in log:
lowercase__ : str = log['loss']
break
if self.first_column == "Epoch":
lowercase__ : Union[str, Any] = int(state.epoch )
else:
lowercase__ : str = state.global_step
lowercase__ : Optional[int] = 'eval'
for k in metrics:
if k.endswith('_loss' ):
lowercase__ : Dict = re.sub(R'\_loss$' , '' , a )
lowercase__ : Dict = metrics.pop('total_flos' , a )
lowercase__ : str = metrics.pop('epoch' , a )
lowercase__ : Any = metrics.pop(f"""{metric_key_prefix}_runtime""" , a )
lowercase__ : Optional[int] = metrics.pop(f"""{metric_key_prefix}_samples_per_second""" , a )
lowercase__ : Tuple = metrics.pop(f"""{metric_key_prefix}_steps_per_second""" , a )
lowercase__ : Optional[Any] = metrics.pop(f"""{metric_key_prefix}_jit_compilation_time""" , a )
for k, v in metrics.items():
if k == f"""{metric_key_prefix}_loss""":
lowercase__ : List[Any] = v
else:
lowercase__ : Any = k.split('_' )
lowercase__ : str = ' '.join([part.capitalize() for part in splits[1:]] )
lowercase__ : Optional[int] = v
self.training_tracker.write_line(a )
self.training_tracker.remove_child()
lowercase__ : Optional[int] = None
# Evaluation takes a long time so we should force the next update.
lowercase__ : List[str] = True
def _UpperCAmelCase ( self , a , a , a , **a ) -> str:
self.training_tracker.update(
state.global_step , comment=f"""Epoch {int(state.epoch )}/{state.num_train_epochs}""" , force_update=a )
lowercase__ : Optional[Any] = None
| 645
|
"""simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class UpperCAmelCase_ :
def __init__( self , a , a=1_3 , a=1_0 , a=3 , a=2 , a=2 , a=2 , a=True , a=True , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=1_0 , a=0.02 , a=0.9 , a=None , ) -> Optional[Any]:
lowercase__ : str = parent
lowercase__ : int = batch_size
lowercase__ : Union[str, Any] = image_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : Dict = patch_size
lowercase__ : Tuple = tubelet_size
lowercase__ : Optional[int] = num_frames
lowercase__ : Optional[int] = is_training
lowercase__ : int = use_labels
lowercase__ : Optional[int] = hidden_size
lowercase__ : Union[str, Any] = num_hidden_layers
lowercase__ : Optional[int] = num_attention_heads
lowercase__ : Any = intermediate_size
lowercase__ : str = hidden_act
lowercase__ : List[Any] = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : Union[str, Any] = type_sequence_label_size
lowercase__ : List[Any] = initializer_range
lowercase__ : str = mask_ratio
lowercase__ : Optional[Any] = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
lowercase__ : Optional[Any] = (image_size // patch_size) ** 2
lowercase__ : str = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
lowercase__ : str = int(mask_ratio * self.seq_length )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : int = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowercase__ : int = None
if self.use_labels:
lowercase__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Dict = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> Tuple:
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self , a , a , a ) -> Optional[int]:
lowercase__ : Dict = VideoMAEModel(config=a )
model.to(a )
model.eval()
lowercase__ : Tuple = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , a , a , a ) -> Union[str, Any]:
lowercase__ : str = VideoMAEForPreTraining(a )
model.to(a )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowercase__ : Any = torch.ones((self.num_masks,) )
lowercase__ : str = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
lowercase__ : Optional[int] = mask.expand(self.batch_size , -1 ).bool()
lowercase__ : str = model(a , a )
# model only returns predictions for masked patches
lowercase__ : str = mask.sum().item()
lowercase__ : int = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Dict = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = config_and_inputs
lowercase__ : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : Tuple = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
lowerCamelCase__ : Optional[int] = (
{"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
lowerCamelCase__ : Any = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : Union[str, Any] = False
lowerCamelCase__ : str = False
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Optional[Any] = VideoMAEModelTester(self )
lowercase__ : Optional[Any] = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=3_7 )
def _UpperCAmelCase ( self , a , a , a=False ) -> Optional[int]:
lowercase__ : Union[str, Any] = copy.deepcopy(a )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowercase__ : Optional[Any] = torch.ones((self.model_tester.num_masks,) )
lowercase__ : Any = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
lowercase__ : Any = mask.expand(self.model_tester.batch_size , -1 ).bool()
lowercase__ : Union[str, Any] = bool_masked_pos.to(a )
if return_labels:
if model_class in [
*get_values(a ),
]:
lowercase__ : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a )
return inputs_dict
def _UpperCAmelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason='VideoMAE does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> Dict:
pass
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : int = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = model_class(a )
lowercase__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[Any] = [*signature.parameters.keys()]
lowercase__ : int = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a )
@slow
def _UpperCAmelCase ( self ) -> str:
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : List[Any] = VideoMAEModel.from_pretrained(a )
self.assertIsNotNone(a )
def _UpperCAmelCase ( self ) -> Optional[Any]:
if not self.has_attentions:
pass
else:
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : str = True
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = self.model_tester.seq_length - self.model_tester.num_masks
lowercase__ : Any = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
lowercase__ : Optional[Any] = True
lowercase__ : int = False
lowercase__ : Any = True
lowercase__ : List[str] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Optional[int] = model(**self._prepare_for_class(a , a ) )
lowercase__ : Dict = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ : str = True
lowercase__ : List[str] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : List[Any] = model(**self._prepare_for_class(a , a ) )
lowercase__ : Optional[Any] = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowercase__ : List[str] = len(a )
# Check attention is always last and order is fine
lowercase__ : Optional[int] = True
lowercase__ : List[str] = True
lowercase__ : int = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : List[str] = model(**self._prepare_for_class(a , a ) )
self.assertEqual(out_len + 1 , len(a ) )
lowercase__ : int = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _UpperCAmelCase ( self ) -> Optional[int]:
def check_hidden_states_output(a , a , a ):
lowercase__ : Optional[int] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Optional[Any] = model(**self._prepare_for_class(a , a ) )
lowercase__ : Optional[int] = outputs.hidden_states
lowercase__ : List[Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(a ) , a )
lowercase__ : Optional[Any] = self.model_tester.seq_length - self.model_tester.num_masks
lowercase__ : Union[str, Any] = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Tuple = True
check_hidden_states_output(a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Union[str, Any] = True
check_hidden_states_output(a , a , a )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _UpperCAmelCase ( self ) -> List[Any]:
pass
def a_ ( ):
'''simple docstring'''
lowercase__ : int = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
lowercase__ : str = np.load(_lowerCAmelCase )
return list(_lowerCAmelCase )
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
@cached_property
def _UpperCAmelCase ( self ) -> Optional[Any]:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Dict = VideoMAEForVideoClassification.from_pretrained('MCG-NJU/videomae-base-finetuned-kinetics' ).to(
a )
lowercase__ : str = self.default_image_processor
lowercase__ : List[str] = prepare_video()
lowercase__ : int = image_processor(a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ : Union[str, Any] = model(**a )
# verify the logits
lowercase__ : str = torch.Size((1, 4_0_0) )
self.assertEqual(outputs.logits.shape , a )
lowercase__ : List[Any] = torch.tensor([0.3_669, -0.0_688, -0.2_421] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 ) )
@slow
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Optional[int] = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' ).to(a )
lowercase__ : Optional[Any] = self.default_image_processor
lowercase__ : List[str] = prepare_video()
lowercase__ : str = image_processor(a , return_tensors='pt' ).to(a )
# add boolean mask, indicating which patches to mask
lowercase__ : Union[str, Any] = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' )
lowercase__ : str = torch.load(a )
# forward pass
with torch.no_grad():
lowercase__ : List[Any] = model(**a )
# verify the logits
lowercase__ : Dict = torch.Size([1, 1_4_0_8, 1_5_3_6] )
lowercase__ : List[str] = torch.tensor(
[[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] , device=a )
self.assertEqual(outputs.logits.shape , a )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , a , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
lowercase__ : List[Any] = torch.tensor([0.5_142] , device=a )
self.assertTrue(torch.allclose(outputs.loss , a , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
lowercase__ : Tuple = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' , norm_pix_loss=a ).to(
a )
with torch.no_grad():
lowercase__ : Any = model(**a )
lowercase__ : List[Any] = torch.tensor(torch.tensor([0.6_469] ) , device=a )
self.assertTrue(torch.allclose(outputs.loss , a , atol=1e-4 ) )
| 645
| 1
|
"""simple docstring"""
def a_ ( _lowerCAmelCase : list ):
'''simple docstring'''
if len(_lowerCAmelCase ) <= 1:
return lst
lowercase__ : Dict = 1
while i < len(_lowerCAmelCase ):
if lst[i - 1] <= lst[i]:
i += 1
else:
lowercase__ , lowercase__ : Optional[int] = lst[i], lst[i - 1]
i -= 1
if i == 0:
lowercase__ : int = 1
return lst
if __name__ == "__main__":
_UpperCamelCase : Union[str, Any] = input("Enter numbers separated by a comma:\n").strip()
_UpperCamelCase : List[str] = [int(item) for item in user_input.split(",")]
print(gnome_sort(unsorted))
| 645
|
"""simple docstring"""
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
_UpperCamelCase : Dict = logging.get_logger(__name__)
_UpperCamelCase : List[Any] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"encoder.layer_norm_for_extract": "layer_norm_for_extract",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"label_embs_concat": "label_embeddings_concat",
"mask_emb": "masked_spec_embed",
"spk_proj": "speaker_proj",
}
_UpperCamelCase : List[str] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"label_embeddings_concat",
"speaker_proj",
"layer_norm_for_extract",
]
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple ):
'''simple docstring'''
for attribute in key.split('.' ):
lowercase__ : Dict = getattr(_lowerCAmelCase , _lowerCAmelCase )
if weight_type is not None:
lowercase__ : Optional[int] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape
else:
lowercase__ : Optional[int] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
lowercase__ : Optional[Any] = value
elif weight_type == "weight_g":
lowercase__ : Dict = value
elif weight_type == "weight_v":
lowercase__ : List[str] = value
elif weight_type == "bias":
lowercase__ : Optional[Any] = value
else:
lowercase__ : List[str] = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : Tuple = []
lowercase__ : List[str] = fairseq_model.state_dict()
lowercase__ : Union[str, Any] = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
lowercase__ : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == 'group' , )
lowercase__ : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
lowercase__ : List[Any] = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
lowercase__ : int = True
if "*" in mapped_key:
lowercase__ : Optional[int] = name.split(_lowerCAmelCase )[0].split('.' )[-2]
lowercase__ : List[str] = mapped_key.replace('*' , _lowerCAmelCase )
if "weight_g" in name:
lowercase__ : List[Any] = 'weight_g'
elif "weight_v" in name:
lowercase__ : int = 'weight_v'
elif "bias" in name:
lowercase__ : Dict = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase__ : Union[str, Any] = 'weight'
else:
lowercase__ : int = None
set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def a_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Dict ):
'''simple docstring'''
lowercase__ : int = full_name.split('conv_layers.' )[-1]
lowercase__ : int = name.split('.' )
lowercase__ : int = int(items[0] )
lowercase__ : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
lowercase__ : Union[str, Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
lowercase__ : Optional[int] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
lowercase__ : List[Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
lowercase__ : int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_lowerCAmelCase )
@torch.no_grad()
def a_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : str=None , _lowerCAmelCase : Tuple=True ):
'''simple docstring'''
if config_path is not None:
lowercase__ : Any = UniSpeechSatConfig.from_pretrained(_lowerCAmelCase )
else:
lowercase__ : Any = UniSpeechSatConfig()
lowercase__ : Union[str, Any] = ''
if is_finetuned:
lowercase__ : Optional[Any] = UniSpeechSatForCTC(_lowerCAmelCase )
else:
lowercase__ : List[Any] = UniSpeechSatForPreTraining(_lowerCAmelCase )
lowercase__ , lowercase__ , lowercase__ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
lowercase__ : Union[str, Any] = model[0].eval()
recursively_load_weights(_lowerCAmelCase , _lowerCAmelCase )
hf_wavavec.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_UpperCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_UpperCamelCase : str = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 645
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase_ ( unittest.TestCase):
def __init__( self , a , a=7 , a=3 , a=1_8 , a=3_0 , a=4_0_0 , a=True , a=None , a=True , ) -> Optional[Any]:
lowercase__ : str = size if size is not None else {'height': 1_8, 'width': 1_8}
lowercase__ : int = parent
lowercase__ : List[Any] = batch_size
lowercase__ : Optional[int] = num_channels
lowercase__ : Union[str, Any] = image_size
lowercase__ : List[Any] = min_resolution
lowercase__ : Any = max_resolution
lowercase__ : List[Any] = do_resize
lowercase__ : List[str] = size
lowercase__ : Union[str, Any] = apply_ocr
def _UpperCAmelCase ( self ) -> Any:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class UpperCAmelCase_ ( _a , unittest.TestCase):
lowerCamelCase__ : Union[str, Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Dict = LayoutLMvaImageProcessingTester(self )
@property
def _UpperCAmelCase ( self ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , 'do_resize' ) )
self.assertTrue(hasattr(a , 'size' ) )
self.assertTrue(hasattr(a , 'apply_ocr' ) )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 1_8, 'width': 1_8} )
lowercase__ : str = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {'height': 4_2, 'width': 4_2} )
def _UpperCAmelCase ( self ) -> Dict:
pass
def _UpperCAmelCase ( self ) -> Union[str, Any]:
# Initialize image_processing
lowercase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
lowercase__ : Dict = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , a )
self.assertIsInstance(encoding.boxes , a )
# Test batched
lowercase__ : Optional[int] = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _UpperCAmelCase ( self ) -> str:
# Initialize image_processing
lowercase__ : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
lowercase__ : Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowercase__ : Optional[int] = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _UpperCAmelCase ( self ) -> Dict:
# Initialize image_processing
lowercase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
lowercase__ : List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowercase__ : List[str] = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _UpperCAmelCase ( self ) -> List[str]:
# with apply_OCR = True
lowercase__ : List[str] = LayoutLMvaImageProcessor()
from datasets import load_dataset
lowercase__ : Optional[Any] = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
lowercase__ : List[Any] = Image.open(ds[0]['file'] ).convert('RGB' )
lowercase__ : Dict = image_processing(a , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
lowercase__ : str = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
lowercase__ : Any = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , a )
self.assertListEqual(encoding.boxes , a )
# with apply_OCR = False
lowercase__ : List[str] = LayoutLMvaImageProcessor(apply_ocr=a )
lowercase__ : Dict = image_processing(a , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) )
| 645
|
"""simple docstring"""
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class UpperCAmelCase_ :
def __init__( self , a , a=1_3 , a=3_2 , a=2 , a=3 , a=1_6 , a=[1, 2, 1] , a=[2, 2, 4] , a=2 , a=2.0 , a=True , a=0.0 , a=0.0 , a=0.1 , a="gelu" , a=False , a=True , a=0.02 , a=1e-5 , a=True , a=None , a=True , a=1_0 , a=8 , a=["stage1", "stage2", "stage3"] , a=[1, 2, 3] , ) -> int:
lowercase__ : int = parent
lowercase__ : Union[str, Any] = batch_size
lowercase__ : Dict = image_size
lowercase__ : str = patch_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : List[str] = embed_dim
lowercase__ : Any = depths
lowercase__ : Dict = num_heads
lowercase__ : List[str] = window_size
lowercase__ : int = mlp_ratio
lowercase__ : Tuple = qkv_bias
lowercase__ : Union[str, Any] = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : Tuple = drop_path_rate
lowercase__ : List[str] = hidden_act
lowercase__ : Optional[Any] = use_absolute_embeddings
lowercase__ : Optional[Any] = patch_norm
lowercase__ : Any = layer_norm_eps
lowercase__ : List[Any] = initializer_range
lowercase__ : List[str] = is_training
lowercase__ : int = scope
lowercase__ : Optional[int] = use_labels
lowercase__ : List[Any] = type_sequence_label_size
lowercase__ : List[str] = encoder_stride
lowercase__ : Optional[Any] = out_features
lowercase__ : Dict = out_indices
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Optional[Any] = None
if self.use_labels:
lowercase__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Tuple = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _UpperCAmelCase ( self , a , a , a ) -> Dict:
lowercase__ : Tuple = MaskFormerSwinModel(config=a )
model.to(a )
model.eval()
lowercase__ : str = model(a )
lowercase__ : str = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowercase__ : Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _UpperCAmelCase ( self , a , a , a ) -> Optional[int]:
lowercase__ : List[Any] = MaskFormerSwinBackbone(config=a )
model.to(a )
model.eval()
lowercase__ : int = model(a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [1_3, 1_6, 1_6, 1_6] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [1_6, 3_2, 6_4] )
# verify ValueError
with self.parent.assertRaises(a ):
lowercase__ : Dict = ['stem']
lowercase__ : List[str] = MaskFormerSwinBackbone(config=a )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : int = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Tuple = config_and_inputs
lowercase__ : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : Optional[int] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : List[str] = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
lowerCamelCase__ : str = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : int = False
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : str = MaskFormerSwinModelTester(self )
lowercase__ : Tuple = ConfigTester(self , config_class=a , embed_dim=3_7 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'
' `nn.DataParallel`'
) )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
def _UpperCAmelCase ( self ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCAmelCase ( self ) -> str:
return
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*a )
@unittest.skip('Swin does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> Tuple:
pass
@unittest.skip('Swin does not support feedforward chunking' )
def _UpperCAmelCase ( self ) -> Tuple:
pass
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def _UpperCAmelCase ( self ) -> str:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Any = model_class(a )
lowercase__ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[Any] = [*signature.parameters.keys()]
lowercase__ : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
@unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' )
def _UpperCAmelCase ( self ) -> List[Any]:
pass
@unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' )
def _UpperCAmelCase ( self ) -> int:
pass
def _UpperCAmelCase ( self , a , a , a , a ) -> Tuple:
lowercase__ : Dict = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : str = model(**self._prepare_for_class(a , a ) )
lowercase__ : List[Any] = outputs.hidden_states
lowercase__ : str = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(a ) , a )
# Swin has a different seq_length
lowercase__ : Dict = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowercase__ : List[str] = True
self.check_hidden_states_output(a , a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : List[str] = True
self.check_hidden_states_output(a , a , a , a )
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Union[str, Any] = 3
lowercase__ : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase__ : Tuple = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase__ : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowercase__ : List[str] = True
self.check_hidden_states_output(a , a , a , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : int = True
self.check_hidden_states_output(a , a , a , (padded_height, padded_width) )
@unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def _UpperCAmelCase ( self ) -> Any:
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def _UpperCAmelCase ( self ) -> Any:
pass
def _UpperCAmelCase ( self ) -> Any:
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(a ):
lowercase__ : Union[str, Any] = 0
return t
def check_equivalence(a , a , a , a={} ):
with torch.no_grad():
lowercase__ : Optional[Any] = model(**a , return_dict=a , **a )
lowercase__ : Optional[int] = model(**a , return_dict=a , **a ).to_tuple()
def recursive_check(a , a ):
if isinstance(a , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(a , a ):
recursive_check(a , a )
elif isinstance(a , a ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(a , a )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(a ) , set_nan_tensor_to_zero(a ) , atol=1e-5 ) , msg=(
'Tuple and dict output are not equal. Difference:'
f""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"""
f""" {torch.isnan(a ).any()} and `inf`: {torch.isinf(a )}. Dict has"""
f""" `nan`: {torch.isnan(a ).any()} and `inf`: {torch.isinf(a )}."""
) , )
recursive_check(a , a )
for model_class in self.all_model_classes:
lowercase__ : Any = model_class(a )
model.to(a )
model.eval()
lowercase__ : Tuple = self._prepare_for_class(a , a )
lowercase__ : Optional[Any] = self._prepare_for_class(a , a )
check_equivalence(a , a , a )
lowercase__ : Any = self._prepare_for_class(a , a , return_labels=a )
lowercase__ : List[Any] = self._prepare_for_class(a , a , return_labels=a )
check_equivalence(a , a , a )
lowercase__ : Any = self._prepare_for_class(a , a )
lowercase__ : int = self._prepare_for_class(a , a )
check_equivalence(a , a , a , {'output_hidden_states': True} )
lowercase__ : Dict = self._prepare_for_class(a , a , return_labels=a )
lowercase__ : Optional[int] = self._prepare_for_class(a , a , return_labels=a )
check_equivalence(a , a , a , {'output_hidden_states': True} )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase , _a):
lowerCamelCase__ : Dict = (MaskFormerSwinBackbone,) if is_torch_available() else ()
lowerCamelCase__ : Optional[int] = MaskFormerSwinConfig
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ : Optional[int] = MaskFormerSwinModelTester(self )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : int = inputs_dict['pixel_values'].shape[0]
for backbone_class in self.all_model_classes:
lowercase__ : Optional[Any] = backbone_class(a )
backbone.to(a )
backbone.eval()
lowercase__ : Union[str, Any] = backbone(**a )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , a )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
lowercase__ : List[str] = backbone(**a , output_hidden_states=a )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
lowercase__ , lowercase__ , lowercase__ : int = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
lowercase__ : List[Any] = backbone(**a , output_attentions=a )
self.assertIsNotNone(outputs.attentions )
| 645
| 1
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
_UpperCamelCase : Optional[int] = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a):
def __init__( self , *a , **a ) -> None:
warnings.warn(
'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use FlavaImageProcessor instead.' , a , )
super().__init__(*a , **a )
| 645
|
"""simple docstring"""
import math
def a_ ( _lowerCAmelCase : int = 100 ):
'''simple docstring'''
lowercase__ : Union[str, Any] = sum(i * i for i in range(1 , n + 1 ) )
lowercase__ : str = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 645
| 1
|
"""simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_UpperCamelCase : Any = 16
_UpperCamelCase : Tuple = 32
def a_ ( _lowerCAmelCase : Accelerator , _lowerCAmelCase : int = 16 ):
'''simple docstring'''
lowercase__ : List[str] = AutoTokenizer.from_pretrained('bert-base-cased' )
lowercase__ : int = load_dataset('glue' , 'mrpc' )
def tokenize_function(_lowerCAmelCase : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ : List[Any] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase__ : List[Any] = datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ : Dict = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_lowerCAmelCase : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase__ : Optional[int] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase__ : Dict = 16
elif accelerator.mixed_precision != "no":
lowercase__ : List[Any] = 8
else:
lowercase__ : List[str] = None
return tokenizer.pad(
_lowerCAmelCase , padding='longest' , max_length=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_tensors='pt' , )
# Instantiate dataloaders.
lowercase__ : Dict = DataLoader(
tokenized_datasets['train'] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
lowercase__ : Tuple = DataLoader(
tokenized_datasets['validation'] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_UpperCamelCase : Any = mocked_dataloaders # noqa: F811
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : int ):
'''simple docstring'''
if os.environ.get('TESTING_MOCKED_DATALOADERS' , _lowerCAmelCase ) == "1":
lowercase__ : int = 2
# Initialize accelerator
lowercase__ : Optional[int] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ : Dict = config['lr']
lowercase__ : Optional[Any] = int(config['num_epochs'] )
lowercase__ : List[Any] = int(config['seed'] )
lowercase__ : Dict = int(config['batch_size'] )
lowercase__ : str = evaluate.load('glue' , 'mrpc' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=_lowerCAmelCase )
def inner_training_loop(_lowerCAmelCase : List[Any] ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(_lowerCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ : List[Any] = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=_lowerCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase__ : Optional[Any] = model.to(accelerator.device )
# Instantiate optimizer
lowercase__ : Union[str, Any] = AdamW(params=model.parameters() , lr=_lowerCAmelCase )
lowercase__ , lowercase__ : int = get_dataloaders(_lowerCAmelCase , _lowerCAmelCase )
# Instantiate scheduler
lowercase__ : int = get_linear_schedule_with_warmup(
optimizer=_lowerCAmelCase , num_warmup_steps=100 , num_training_steps=(len(_lowerCAmelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = accelerator.prepare(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Now we train the model
for epoch in range(_lowerCAmelCase ):
model.train()
for step, batch in enumerate(_lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowercase__ : Union[str, Any] = model(**_lowerCAmelCase )
lowercase__ : Tuple = outputs.loss
accelerator.backward(_lowerCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ : str = model(**_lowerCAmelCase )
lowercase__ : List[Any] = outputs.logits.argmax(dim=-1 )
lowercase__ , lowercase__ : List[str] = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=_lowerCAmelCase , references=_lowerCAmelCase , )
lowercase__ : Any = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , _lowerCAmelCase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def a_ ( ):
'''simple docstring'''
lowercase__ : Optional[Any] = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=_lowerCAmelCase , default=_lowerCAmelCase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
lowercase__ : Optional[int] = parser.parse_args()
lowercase__ : Any = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
main()
| 645
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ , lowercase__ : str = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-canny' , from_pt=a , dtype=jnp.bfloataa )
lowercase__ , lowercase__ : List[str] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=a , from_pt=a , dtype=jnp.bfloataa )
lowercase__ : List[Any] = controlnet_params
lowercase__ : int = 'bird'
lowercase__ : List[Any] = jax.device_count()
lowercase__ : Dict = pipe.prepare_text_inputs([prompts] * num_samples )
lowercase__ : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' )
lowercase__ : Optional[int] = pipe.prepare_image_inputs([canny_image] * num_samples )
lowercase__ : List[Any] = jax.random.PRNGKey(0 )
lowercase__ : Tuple = jax.random.split(a , jax.device_count() )
lowercase__ : str = replicate(a )
lowercase__ : List[str] = shard(a )
lowercase__ : Dict = shard(a )
lowercase__ : List[Any] = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=5_0 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3)
lowercase__ : Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowercase__ : Tuple = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
lowercase__ : int = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase__ : Optional[Any] = jnp.array(
[0.167_969, 0.116_699, 0.081_543, 0.154_297, 0.132_812, 0.108_887, 0.169_922, 0.169_922, 0.205_078] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ , lowercase__ : int = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-openpose' , from_pt=a , dtype=jnp.bfloataa )
lowercase__ , lowercase__ : Optional[Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=a , from_pt=a , dtype=jnp.bfloataa )
lowercase__ : Optional[Any] = controlnet_params
lowercase__ : List[Any] = 'Chef in the kitchen'
lowercase__ : List[str] = jax.device_count()
lowercase__ : Dict = pipe.prepare_text_inputs([prompts] * num_samples )
lowercase__ : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' )
lowercase__ : Optional[int] = pipe.prepare_image_inputs([pose_image] * num_samples )
lowercase__ : List[str] = jax.random.PRNGKey(0 )
lowercase__ : str = jax.random.split(a , jax.device_count() )
lowercase__ : Optional[Any] = replicate(a )
lowercase__ : Optional[Any] = shard(a )
lowercase__ : List[Any] = shard(a )
lowercase__ : List[Any] = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=5_0 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3)
lowercase__ : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowercase__ : List[str] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
lowercase__ : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase__ : str = jnp.array(
[[0.271_484, 0.261_719, 0.275_391, 0.277_344, 0.279_297, 0.291_016, 0.294_922, 0.302_734, 0.302_734]] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 645
| 1
|
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
_UpperCamelCase : Optional[Any] = ["gpt2"]
_UpperCamelCase : List[Any] = "gpt2"
if is_tf_available():
class UpperCAmelCase_ ( tf.Module):
def __init__( self , a ) -> List[Any]:
super().__init__()
lowercase__ : Dict = tokenizer
lowercase__ : Any = AutoConfig.from_pretrained(a )
lowercase__ : Union[str, Any] = TFGPTaLMHeadModel.from_config(a )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='text' ),) )
def _UpperCAmelCase ( self , a ) -> Union[str, Any]:
lowercase__ : List[Any] = self.tokenizer(a )
lowercase__ : Tuple = tokenized['input_ids'].to_tensor()
lowercase__ : List[str] = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
lowercase__ : Optional[Any] = self.model(input_ids=a , attention_mask=a )['logits']
return outputs
@require_tf
@require_keras_nlp
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self ) -> List[Any]:
super().setUp()
lowercase__ : List[Any] = [GPTaTokenizer.from_pretrained(a ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
lowercase__ : int = [TFGPTaTokenizer.from_pretrained(a ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
lowercase__ : Optional[Any] = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
lowercase__ : Union[str, Any] = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def _UpperCAmelCase ( self ) -> List[Any]:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
lowercase__ : str = tokenizer([test_inputs] , return_tensors='tf' )
lowercase__ : List[str] = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
lowercase__ : Optional[int] = python_outputs[key].numpy()
lowercase__ : List[str] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(a , tf.intaa ) == tf_outputs_values ) )
@slow
def _UpperCAmelCase ( self ) -> int:
for tf_tokenizer in self.tf_tokenizers:
lowercase__ : Union[str, Any] = tf.function(a )
for test_inputs in self.test_sentences:
lowercase__ : List[str] = tf.constant(a )
lowercase__ : Tuple = compiled_tokenizer(a )
lowercase__ : Union[str, Any] = tf_tokenizer(a )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def _UpperCAmelCase ( self ) -> Dict:
for tf_tokenizer in self.tf_tokenizers:
lowercase__ : str = ModelToSave(tokenizer=a )
lowercase__ : Tuple = tf.convert_to_tensor([self.test_sentences[0]] )
lowercase__ : Optional[int] = model.serving(a ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
lowercase__ : List[Any] = Path(a ) / 'saved.model'
tf.saved_model.save(a , a , signatures={'serving_default': model.serving} )
lowercase__ : int = tf.saved_model.load(a )
lowercase__ : Optional[Any] = loaded_model.signatures['serving_default'](a )['output_0']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def _UpperCAmelCase ( self ) -> Dict:
for tf_tokenizer in self.tf_tokenizers:
lowercase__ : List[str] = tf.convert_to_tensor([self.test_sentences[0]] )
lowercase__ : Optional[Any] = tf_tokenizer(a ) # Build model with some sample inputs
lowercase__ : List[Any] = tf_tokenizer.get_config()
lowercase__ : Dict = TFGPTaTokenizer.from_config(a )
lowercase__ : Optional[Any] = model_from_config(a )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def _UpperCAmelCase ( self ) -> Any:
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
lowercase__ : Optional[Any] = 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
lowercase__ : Tuple = tf.convert_to_tensor([self.test_sentences[0]] )
lowercase__ : List[Any] = tf_tokenizer(a , max_length=a )
lowercase__ : Optional[Any] = out['input_ids'].numpy().shape[1]
assert out_length == max_length
| 645
|
"""simple docstring"""
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 645
| 1
|
"""simple docstring"""
import math
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : Tuple = [True] * n
lowercase__ : Optional[Any] = False
lowercase__ : Dict = False
lowercase__ : Dict = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
lowercase__ : int = i * 2
while index < n:
lowercase__ : Any = False
lowercase__ : List[str] = index + i
lowercase__ : Optional[Any] = [2]
for i in range(3 , _lowerCAmelCase , 2 ):
if is_prime[i]:
primes.append(_lowerCAmelCase )
return primes
def a_ ( _lowerCAmelCase : int = 9999_6666_3333 ):
'''simple docstring'''
lowercase__ : Any = math.floor(math.sqrt(_lowerCAmelCase ) ) + 100
lowercase__ : Dict = prime_sieve(_lowerCAmelCase )
lowercase__ : str = 0
lowercase__ : Tuple = 0
lowercase__ : Any = primes[prime_index]
while (last_prime**2) <= limit:
lowercase__ : Optional[Any] = primes[prime_index + 1]
lowercase__ : str = last_prime**2
lowercase__ : Dict = next_prime**2
# Get numbers divisible by lps(current)
lowercase__ : List[str] = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
lowercase__ : List[Any] = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
lowercase__ : Union[str, Any] = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
lowercase__ : str = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 645
|
"""simple docstring"""
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Optional[Any] = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
lowercase__ : Union[str, Any] = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
model.to(a )
from datasets import load_dataset
lowercase__ : str = load_dataset('nielsr/rvlcdip-demo' )
lowercase__ : Tuple = dataset['train'][0]['image'].convert('RGB' )
lowercase__ : int = image_processor(a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ : List[str] = model(**a )
lowercase__ : List[Any] = outputs.logits
lowercase__ : Union[str, Any] = torch.Size((1, 1_6) )
self.assertEqual(logits.shape , a )
lowercase__ : Tuple = torch.tensor(
[-0.4_158, -0.4_092, -0.4_347] , device=a , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , a , atol=1e-4 ) )
| 645
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCamelCase : Union[str, Any] = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : str = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Dict = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : List[str] = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : List[str] = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
_UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 645
|
"""simple docstring"""
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase_ :
@staticmethod
def _UpperCAmelCase ( *a , **a ) -> int:
pass
def a_ ( _lowerCAmelCase : Image ):
'''simple docstring'''
lowercase__ : List[str] = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
lowerCamelCase__ : Union[str, Any] = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def _UpperCAmelCase ( self , a , a , a ) -> Dict:
lowercase__ : Union[str, Any] = DepthEstimationPipeline(model=a , image_processor=a )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def _UpperCAmelCase ( self , a , a ) -> Optional[int]:
lowercase__ : Tuple = depth_estimator('./tests/fixtures/tests_samples/COCO/000000039769.png' )
self.assertEqual({'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )} , a )
import datasets
lowercase__ : Tuple = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' )
lowercase__ : List[Any] = depth_estimator(
[
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
] )
self.assertEqual(
[
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
] , a , )
@require_tf
@unittest.skip('Depth estimation is not implemented in TF' )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
@slow
@require_torch
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Tuple = 'Intel/dpt-large'
lowercase__ : Optional[int] = pipeline('depth-estimation' , model=a )
lowercase__ : List[Any] = depth_estimator('http://images.cocodataset.org/val2017/000000039769.jpg' )
lowercase__ : Optional[Any] = hashimage(outputs['depth'] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['predicted_depth'].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs['predicted_depth'].min().item() ) , 2.662 )
@require_torch
def _UpperCAmelCase ( self ) -> Optional[int]:
# This is highly irregular to have no small tests.
self.skipTest('There is not hf-internal-testing tiny model for either GLPN nor DPT' )
| 645
| 1
|
"""simple docstring"""
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCamelCase : int = {
"configuration_cpmant": ["CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CpmAntConfig"],
"tokenization_cpmant": ["CpmAntTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : str = [
"CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST",
"CpmAntForCausalLM",
"CpmAntModel",
"CpmAntPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
_UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 645
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class UpperCAmelCase_ ( _a):
def __init__( self ) -> Any:
lowercase__ : Tuple = []
def _UpperCAmelCase ( self , a , a , a , **a ) -> Any:
self.events.append('on_init_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Optional[int]:
self.events.append('on_train_begin' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> List[str]:
self.events.append('on_train_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> int:
self.events.append('on_epoch_begin' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Optional[Any]:
self.events.append('on_epoch_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> int:
self.events.append('on_step_begin' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> str:
self.events.append('on_step_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> int:
self.events.append('on_evaluate' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Tuple:
self.events.append('on_predict' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Union[str, Any]:
self.events.append('on_save' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> List[str]:
self.events.append('on_log' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Any:
self.events.append('on_prediction_step' )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self ) -> str:
lowercase__ : str = tempfile.mkdtemp()
def _UpperCAmelCase ( self ) -> Dict:
shutil.rmtree(self.output_dir )
def _UpperCAmelCase ( self , a=0 , a=0 , a=6_4 , a=6_4 , a=None , a=False , **a ) -> int:
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
lowercase__ : str = RegressionDataset(length=a )
lowercase__ : Any = RegressionDataset(length=a )
lowercase__ : Optional[Any] = RegressionModelConfig(a=a , b=a )
lowercase__ : Union[str, Any] = RegressionPreTrainedModel(a )
lowercase__ : Tuple = TrainingArguments(self.output_dir , disable_tqdm=a , report_to=[] , **a )
return Trainer(
a , a , train_dataset=a , eval_dataset=a , callbacks=a , )
def _UpperCAmelCase ( self , a , a ) -> Union[str, Any]:
self.assertEqual(len(a ) , len(a ) )
# Order doesn't matter
lowercase__ : Optional[int] = sorted(a , key=lambda a : cb.__name__ if isinstance(a , a ) else cb.__class__.__name__ )
lowercase__ : Tuple = sorted(a , key=lambda a : cb.__name__ if isinstance(a , a ) else cb.__class__.__name__ )
for cba, cba in zip(a , a ):
if isinstance(a , a ) and isinstance(a , a ):
self.assertEqual(a , a )
elif isinstance(a , a ) and not isinstance(a , a ):
self.assertEqual(a , cba.__class__ )
elif not isinstance(a , a ) and isinstance(a , a ):
self.assertEqual(cba.__class__ , a )
else:
self.assertEqual(a , a )
def _UpperCAmelCase ( self , a ) -> Optional[Any]:
lowercase__ : Dict = ['on_init_end', 'on_train_begin']
lowercase__ : List[Any] = 0
lowercase__ : Optional[int] = len(trainer.get_eval_dataloader() )
lowercase__ : Tuple = ['on_prediction_step'] * len(trainer.get_eval_dataloader() ) + ['on_log', 'on_evaluate']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('on_epoch_begin' )
for _ in range(a ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('on_log' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('on_save' )
expected_events.append('on_epoch_end' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : int = self.get_trainer()
lowercase__ : str = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
# Callbacks passed at init are added to the default callbacks
lowercase__ : str = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
lowercase__ : List[Any] = self.get_trainer(disable_tqdm=a )
lowercase__ : Optional[Any] = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : int = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
lowercase__ : List[str] = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(a )
expected_callbacks.remove(a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
lowercase__ : Optional[Any] = self.get_trainer()
lowercase__ : List[Any] = trainer.pop_callback(a )
self.assertEqual(cb.__class__ , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
trainer.add_callback(a )
expected_callbacks.insert(0 , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
# We can also add, pop, or remove by instance
lowercase__ : int = self.get_trainer()
lowercase__ : List[str] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(a )
expected_callbacks.remove(a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
lowercase__ : Tuple = self.get_trainer()
lowercase__ : Dict = trainer.callback_handler.callbacks[0]
lowercase__ : Union[str, Any] = trainer.pop_callback(a )
self.assertEqual(a , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
trainer.add_callback(a )
expected_callbacks.insert(0 , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
def _UpperCAmelCase ( self ) -> Tuple:
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='ignore' , category=a )
lowercase__ : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
lowercase__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
# Independent log/save/eval
lowercase__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
lowercase__ : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
lowercase__ : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
lowercase__ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
lowercase__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='steps' )
trainer.train()
lowercase__ : Optional[int] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
lowercase__ : int = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='epoch' )
trainer.train()
lowercase__ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
# A bit of everything
lowercase__ : Any = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=1_0 , eval_steps=5 , evaluation_strategy='steps' , )
trainer.train()
lowercase__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
# warning should be emitted for duplicated callbacks
with patch('transformers.trainer_callback.logger.warning' ) as warn_mock:
lowercase__ : str = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(a ) in warn_mock.call_args[0][0]
| 645
| 1
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class UpperCAmelCase_ :
# setable values
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : Optional[jnp.ndarray] = None
lowerCamelCase__ : Optional[jnp.ndarray] = None # sigma(t_i)
@classmethod
def _UpperCAmelCase ( cls ) -> int:
return cls()
@dataclass
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : jnp.ndarray
lowerCamelCase__ : jnp.ndarray
lowerCamelCase__ : KarrasVeSchedulerState
class UpperCAmelCase_ ( _a , _a):
@property
def _UpperCAmelCase ( self ) -> Dict:
return True
@register_to_config
def __init__( self , a = 0.02 , a = 1_0_0 , a = 1.007 , a = 8_0 , a = 0.05 , a = 5_0 , ) -> Optional[int]:
pass
def _UpperCAmelCase ( self ) -> int:
return KarrasVeSchedulerState.create()
def _UpperCAmelCase ( self , a , a , a = () ) -> KarrasVeSchedulerState:
lowercase__ : Optional[Any] = jnp.arange(0 , a )[::-1].copy()
lowercase__ : str = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=a , schedule=jnp.array(a , dtype=jnp.floataa ) , timesteps=a , )
def _UpperCAmelCase ( self , a , a , a , a , ) -> Tuple[jnp.ndarray, float]:
if self.config.s_min <= sigma <= self.config.s_max:
lowercase__ : Optional[int] = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
lowercase__ : int = 0
# sample eps ~ N(0, S_noise^2 * I)
lowercase__ : int = random.split(a , num=1 )
lowercase__ : List[Any] = self.config.s_noise * random.normal(key=a , shape=sample.shape )
lowercase__ : List[str] = sigma + gamma * sigma
lowercase__ : int = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _UpperCAmelCase ( self , a , a , a , a , a , a = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
lowercase__ : Union[str, Any] = sample_hat + sigma_hat * model_output
lowercase__ : int = (sample_hat - pred_original_sample) / sigma_hat
lowercase__ : Tuple = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=a , derivative=a , state=a )
def _UpperCAmelCase ( self , a , a , a , a , a , a , a , a = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
lowercase__ : Dict = sample_prev + sigma_prev * model_output
lowercase__ : List[str] = (sample_prev - pred_original_sample) / sigma_prev
lowercase__ : Tuple = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=a , derivative=a , state=a )
def _UpperCAmelCase ( self , a , a , a , a ) -> Optional[Any]:
raise NotImplementedError()
| 645
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_UpperCamelCase : str = {
"configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Tuple = [
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoForQuestionAnswering",
"GPTNeoForSequenceClassification",
"GPTNeoForTokenClassification",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Dict = [
"FlaxGPTNeoForCausalLM",
"FlaxGPTNeoModel",
"FlaxGPTNeoPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
_UpperCamelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 645
| 1
|
"""simple docstring"""
from functools import lru_cache
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : Optional[int] = 2
lowercase__ : List[Any] = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(_lowerCAmelCase )
if n > 1:
factors.add(_lowerCAmelCase )
return factors
@lru_cache
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
return len(unique_prime_factors(_lowerCAmelCase ) )
def a_ ( _lowerCAmelCase : list ):
'''simple docstring'''
return len(set(_lowerCAmelCase ) ) in (0, 1)
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : List[Any] = 2
while True:
# Increment each value of a generated range
lowercase__ : List[Any] = [base + i for i in range(_lowerCAmelCase )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
lowercase__ : Tuple = [upf_len(_lowerCAmelCase ) for x in group]
checker.append(_lowerCAmelCase )
# If all numbers in the list are equal, return the group variable.
if equality(_lowerCAmelCase ):
return group
# Increment our base variable by 1
base += 1
def a_ ( _lowerCAmelCase : int = 4 ):
'''simple docstring'''
lowercase__ : Optional[Any] = run(_lowerCAmelCase )
return results[0] if len(_lowerCAmelCase ) else None
if __name__ == "__main__":
print(solution())
| 645
|
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self , a ) -> str:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
lowercase__ : str = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(a )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Dict = 'sshleifer/tiny-gpt2'
lowercase__ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a , multi_process=a , )
lowercase__ : str = TensorFlowBenchmark(a )
lowercase__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : List[str] = 'sgugger/tiny-distilbert-classification'
lowercase__ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , only_pretrain_model=a , )
lowercase__ : Optional[Any] = TensorFlowBenchmark(a )
lowercase__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : Optional[int] = 'sshleifer/tiny-gpt2'
lowercase__ : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : Optional[Any] = TensorFlowBenchmark(a )
lowercase__ : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : Any = 'sshleifer/tiny-gpt2'
lowercase__ : List[Any] = AutoConfig.from_pretrained(a )
lowercase__ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a , multi_process=a , )
lowercase__ : Tuple = TensorFlowBenchmark(a , [config] )
lowercase__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Optional[Any] = 'sshleifer/tiny-gpt2'
lowercase__ : List[str] = AutoConfig.from_pretrained(a )
lowercase__ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : List[str] = TensorFlowBenchmark(a , [config] )
lowercase__ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Optional[Any] = 'sshleifer/tiny-gpt2'
lowercase__ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : Optional[Any] = TensorFlowBenchmark(a )
lowercase__ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Optional[Any] = 'sshleifer/tiny-gpt2'
lowercase__ : Optional[int] = AutoConfig.from_pretrained(a )
lowercase__ : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : str = TensorFlowBenchmark(a , [config] )
lowercase__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : List[str] = 'patrickvonplaten/t5-tiny-random'
lowercase__ : Any = AutoConfig.from_pretrained(a )
lowercase__ : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : int = TensorFlowBenchmark(a , configs=[config] )
lowercase__ : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 , 'Cannot do xla on CPU.' )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Any = 'sshleifer/tiny-gpt2'
lowercase__ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , use_xla=a , multi_process=a , )
lowercase__ : Any = TensorFlowBenchmark(a )
lowercase__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Any = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=a , save_to_csv=a , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(a , 'inf_time.csv' ) , inference_memory_csv_file=os.path.join(a , 'inf_mem.csv' ) , env_info_csv_file=os.path.join(a , 'env.csv' ) , multi_process=a , )
lowercase__ : Union[str, Any] = TensorFlowBenchmark(a )
benchmark.run()
self.assertTrue(Path(os.path.join(a , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(a , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(a , 'env.csv' ) ).exists() )
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ : Tuple = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(a ):
self.assertTrue(hasattr(a , 'sequential' ) )
self.assertTrue(hasattr(a , 'cumulative' ) )
self.assertTrue(hasattr(a , 'current' ) )
self.assertTrue(hasattr(a , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=a , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(a , 'log.txt' ) , log_print=a , trace_memory_line_by_line=a , eager_mode=a , multi_process=a , )
lowercase__ : Optional[int] = TensorFlowBenchmark(a )
lowercase__ : Optional[Any] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(a , 'log.txt' ) ).exists() )
| 645
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_UpperCamelCase : Any = {
"configuration_data2vec_audio": ["DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecAudioConfig"],
"configuration_data2vec_text": [
"DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecTextConfig",
"Data2VecTextOnnxConfig",
],
"configuration_data2vec_vision": [
"DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecVisionConfig",
"Data2VecVisionOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Union[str, Any] = [
"DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecAudioForAudioFrameClassification",
"Data2VecAudioForCTC",
"Data2VecAudioForSequenceClassification",
"Data2VecAudioForXVector",
"Data2VecAudioModel",
"Data2VecAudioPreTrainedModel",
]
_UpperCamelCase : int = [
"DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecTextForCausalLM",
"Data2VecTextForMaskedLM",
"Data2VecTextForMultipleChoice",
"Data2VecTextForQuestionAnswering",
"Data2VecTextForSequenceClassification",
"Data2VecTextForTokenClassification",
"Data2VecTextModel",
"Data2VecTextPreTrainedModel",
]
_UpperCamelCase : Any = [
"DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecVisionForImageClassification",
"Data2VecVisionForMaskedImageModeling",
"Data2VecVisionForSemanticSegmentation",
"Data2VecVisionModel",
"Data2VecVisionPreTrainedModel",
]
if is_tf_available():
_UpperCamelCase : Optional[int] = [
"TFData2VecVisionForImageClassification",
"TFData2VecVisionForSemanticSegmentation",
"TFData2VecVisionModel",
"TFData2VecVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
_UpperCamelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 645
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class UpperCAmelCase_ ( _a):
def __init__( self , a , a=1_3 , a=7 , a=True , a=True , a=False , a=True , a=9_9 , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=5_1_2 , a=1_6 , a=2 , a=0.02 , a=3 , a=4 , a=None , ) -> Any:
lowercase__ : Tuple = parent
lowercase__ : List[Any] = batch_size
lowercase__ : List[Any] = seq_length
lowercase__ : List[Any] = is_training
lowercase__ : Optional[Any] = use_input_mask
lowercase__ : Optional[int] = use_token_type_ids
lowercase__ : int = use_labels
lowercase__ : Tuple = vocab_size
lowercase__ : int = hidden_size
lowercase__ : Any = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
lowercase__ : Optional[Any] = intermediate_size
lowercase__ : Optional[Any] = hidden_act
lowercase__ : List[str] = hidden_dropout_prob
lowercase__ : List[Any] = attention_probs_dropout_prob
lowercase__ : List[Any] = max_position_embeddings
lowercase__ : List[str] = type_vocab_size
lowercase__ : Tuple = type_sequence_label_size
lowercase__ : List[Any] = initializer_range
lowercase__ : str = num_labels
lowercase__ : Tuple = num_choices
lowercase__ : str = scope
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : str = None
if self.use_input_mask:
lowercase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Dict = None
lowercase__ : Optional[Any] = None
lowercase__ : int = None
if self.use_labels:
lowercase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ : List[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self ) -> Optional[int]:
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Dict:
lowercase__ : Tuple = DistilBertModel(config=a )
model.to(a )
model.eval()
lowercase__ : Any = model(a , a )
lowercase__ : str = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Dict:
lowercase__ : Optional[int] = DistilBertForMaskedLM(config=a )
model.to(a )
model.eval()
lowercase__ : Union[str, Any] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> int:
lowercase__ : Tuple = DistilBertForQuestionAnswering(config=a )
model.to(a )
model.eval()
lowercase__ : Tuple = model(
a , attention_mask=a , start_positions=a , end_positions=a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> List[str]:
lowercase__ : int = self.num_labels
lowercase__ : Dict = DistilBertForSequenceClassification(a )
model.to(a )
model.eval()
lowercase__ : Optional[Any] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Any:
lowercase__ : Any = self.num_labels
lowercase__ : List[str] = DistilBertForTokenClassification(config=a )
model.to(a )
model.eval()
lowercase__ : Any = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Tuple:
lowercase__ : List[Any] = self.num_choices
lowercase__ : Any = DistilBertForMultipleChoice(config=a )
model.to(a )
model.eval()
lowercase__ : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : int = model(
a , attention_mask=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Union[str, Any] = self.prepare_config_and_inputs()
((lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__)) : List[str] = config_and_inputs
lowercase__ : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : List[str] = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCamelCase__ : str = (
{
"feature-extraction": DistilBertModel,
"fill-mask": DistilBertForMaskedLM,
"question-answering": DistilBertForQuestionAnswering,
"text-classification": DistilBertForSequenceClassification,
"token-classification": DistilBertForTokenClassification,
"zero-shot": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : Any = True
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : Optional[Any] = True
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : str = DistilBertModelTester(self )
lowercase__ : int = ConfigTester(self , config_class=a , dim=3_7 )
def _UpperCAmelCase ( self ) -> Dict:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*a )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*a )
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*a )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*a )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*a )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*a )
@slow
def _UpperCAmelCase ( self ) -> str:
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : str = DistilBertModel.from_pretrained(a )
self.assertIsNotNone(a )
@slow
@require_torch_gpu
def _UpperCAmelCase ( self ) -> Any:
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
lowercase__ : Optional[int] = True
lowercase__ : Union[str, Any] = model_class(config=a )
lowercase__ : int = self._prepare_for_class(a , a )
lowercase__ : Tuple = torch.jit.trace(
a , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a , os.path.join(a , 'traced_model.pt' ) )
lowercase__ : Optional[int] = torch.jit.load(os.path.join(a , 'traced_model.pt' ) , map_location=a )
loaded(inputs_dict['input_ids'].to(a ) , inputs_dict['attention_mask'].to(a ) )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : int = DistilBertModel.from_pretrained('distilbert-base-uncased' )
lowercase__ : Union[str, Any] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
lowercase__ : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase__ : Optional[Any] = model(a , attention_mask=a )[0]
lowercase__ : Tuple = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , a )
lowercase__ : List[Any] = torch.tensor(
[[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) )
| 645
| 1
|
"""simple docstring"""
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class UpperCAmelCase_ ( unittest.TestCase):
def __init__( self , a ) -> List[Any]:
lowercase__ : List[Any] = parent
def _UpperCAmelCase ( self ) -> str:
return {}
def a_ ( ):
'''simple docstring'''
lowercase__ : List[str] = '<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR="FFFFFF">\n <HR>\n <a href="http://google.com">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style="color:#0000FF">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>'
lowercase__ : Optional[Any] = '\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n '
return [html_string_a, html_string_a]
@require_bsa
class UpperCAmelCase_ ( _a , unittest.TestCase):
lowerCamelCase__ : Dict = MarkupLMFeatureExtractor if is_bsa_available() else None
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : Union[str, Any] = MarkupLMFeatureExtractionTester(self )
@property
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return self.feature_extract_tester.prepare_feat_extract_dict()
def _UpperCAmelCase ( self ) -> Optional[int]:
# Initialize feature_extractor
lowercase__ : List[Any] = self.feature_extraction_class()
# Test not batched input
lowercase__ : Tuple = get_html_strings()[0]
lowercase__ : Any = feature_extractor(a )
# fmt: off
lowercase__ : Dict = [['sample document', 'Goog', 'This is one header', 'This is a another Header', 'Travel from', 'SFO to JFK', 'on May 2, 2015 at 2:00 pm. For details go to confirm.com', 'Traveler', 'name', 'is', 'John Doe']]
lowercase__ : str = [['/html/head/title', '/html/body/a', '/html/body/h1', '/html/body/h2', '/html/body/p', '/html/body/p/p/b[1]', '/html/body/p/p/b[2]/i', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/b', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/p']]
# fmt: on
self.assertEqual(encoding.nodes , a )
self.assertEqual(encoding.xpaths , a )
# Test batched
lowercase__ : List[Any] = get_html_strings()
lowercase__ : List[Any] = feature_extractor(a )
# fmt: off
lowercase__ : Union[str, Any] = expected_nodes + [['My First Heading', 'My first paragraph.']]
lowercase__ : Dict = expected_xpaths + [['/html/body/h1', '/html/body/p']]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , a )
self.assertEqual(encoding.xpaths , a )
| 645
|
"""simple docstring"""
from __future__ import annotations
def a_ ( _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float , ):
'''simple docstring'''
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif stress < 0:
raise ValueError('Stress cannot be negative' )
elif tangential_force < 0:
raise ValueError('Tangential Force cannot be negative' )
elif area < 0:
raise ValueError('Area cannot be negative' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 645
| 1
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_UpperCamelCase : int = {
"configuration_efficientnet": [
"EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientNetConfig",
"EfficientNetOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Dict = ["EfficientNetImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : int = [
"EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientNetForImageClassification",
"EfficientNetModel",
"EfficientNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
_UpperCamelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 645
|
"""simple docstring"""
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
def __init__( self , a , a=1_3 , a=[3_0, 3_0] , a=2 , a=3 , a=True , a=True , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=1_0 , a=0.02 , a=3 , a=None , a=8 , a=1_0 , ) -> Any:
lowercase__ : List[str] = parent
lowercase__ : Optional[Any] = batch_size
lowercase__ : Optional[int] = image_size
lowercase__ : List[Any] = patch_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : str = is_training
lowercase__ : Optional[Any] = use_labels
lowercase__ : Optional[Any] = hidden_size
lowercase__ : Dict = num_hidden_layers
lowercase__ : Optional[Any] = num_attention_heads
lowercase__ : Dict = intermediate_size
lowercase__ : List[Any] = hidden_act
lowercase__ : List[Any] = hidden_dropout_prob
lowercase__ : Any = attention_probs_dropout_prob
lowercase__ : Any = type_sequence_label_size
lowercase__ : Dict = initializer_range
lowercase__ : Union[str, Any] = num_labels
lowercase__ : Tuple = scope
lowercase__ : Tuple = n_targets
lowercase__ : Optional[int] = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
lowercase__ : Optional[Any] = (image_size[1] // patch_size) * (image_size[0] // patch_size)
lowercase__ : Tuple = num_patches + 1 + self.num_detection_tokens
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
lowercase__ : Tuple = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
lowercase__ : int = []
for i in range(self.batch_size ):
lowercase__ : Optional[Any] = {}
lowercase__ : Any = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=a )
lowercase__ : List[str] = torch.rand(self.n_targets , 4 , device=a )
labels.append(a )
lowercase__ : Tuple = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> List[Any]:
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def _UpperCAmelCase ( self , a , a , a ) -> int:
lowercase__ : List[str] = YolosModel(config=a )
model.to(a )
model.eval()
lowercase__ : List[Any] = model(a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def _UpperCAmelCase ( self , a , a , a ) -> Union[str, Any]:
lowercase__ : str = YolosForObjectDetection(a )
model.to(a )
model.eval()
lowercase__ : Dict = model(pixel_values=a )
lowercase__ : Tuple = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
lowercase__ : str = model(pixel_values=a , labels=a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : int = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Any = config_and_inputs
lowercase__ : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : Optional[int] = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
lowerCamelCase__ : List[str] = (
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
lowerCamelCase__ : List[Any] = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Tuple = False
lowerCamelCase__ : Union[str, Any] = False
def _UpperCAmelCase ( self , a , a , a=False ) -> Dict:
lowercase__ : List[str] = super()._prepare_for_class(a , a , return_labels=a )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
lowercase__ : Optional[Any] = []
for i in range(self.model_tester.batch_size ):
lowercase__ : Dict = {}
lowercase__ : Dict = torch.ones(
size=(self.model_tester.n_targets,) , device=a , dtype=torch.long )
lowercase__ : Optional[Any] = torch.ones(
self.model_tester.n_targets , 4 , device=a , dtype=torch.float )
labels.append(a )
lowercase__ : Union[str, Any] = labels
return inputs_dict
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : Dict = YolosModelTester(self )
lowercase__ : Optional[int] = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=3_7 )
def _UpperCAmelCase ( self ) -> str:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Optional[Any]:
# YOLOS does not use inputs_embeds
pass
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : int = model_class(a )
lowercase__ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Tuple = [*signature.parameters.keys()]
lowercase__ : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Dict = True
# in YOLOS, the seq_len is different
lowercase__ : Tuple = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
lowercase__ : Optional[int] = True
lowercase__ : str = False
lowercase__ : str = True
lowercase__ : List[str] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Any = model(**self._prepare_for_class(a , a ) )
lowercase__ : str = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ : Optional[int] = True
lowercase__ : List[Any] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Union[str, Any] = model(**self._prepare_for_class(a , a ) )
lowercase__ : List[str] = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowercase__ : Dict = len(a )
# Check attention is always last and order is fine
lowercase__ : Any = True
lowercase__ : int = True
lowercase__ : int = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Any = model(**self._prepare_for_class(a , a ) )
lowercase__ : Optional[Any] = 1
self.assertEqual(out_len + added_hidden_states , len(a ) )
lowercase__ : Tuple = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _UpperCAmelCase ( self ) -> List[str]:
def check_hidden_states_output(a , a , a ):
lowercase__ : str = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : int = model(**self._prepare_for_class(a , a ) )
lowercase__ : int = outputs.hidden_states
lowercase__ : Any = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(a ) , a )
# YOLOS has a different seq_length
lowercase__ : Optional[int] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Any = True
check_hidden_states_output(a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : List[Any] = True
check_hidden_states_output(a , a , a )
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*a )
@slow
def _UpperCAmelCase ( self ) -> Union[str, Any]:
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : int = YolosModel.from_pretrained(a )
self.assertIsNotNone(a )
def a_ ( ):
'''simple docstring'''
lowercase__ : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
@cached_property
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return AutoImageProcessor.from_pretrained('hustvl/yolos-small' ) if is_vision_available() else None
@slow
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Dict = YolosForObjectDetection.from_pretrained('hustvl/yolos-small' ).to(a )
lowercase__ : Tuple = self.default_image_processor
lowercase__ : Optional[int] = prepare_img()
lowercase__ : int = image_processor(images=a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ : int = model(inputs.pixel_values )
# verify outputs
lowercase__ : Tuple = torch.Size((1, 1_0_0, 9_2) )
self.assertEqual(outputs.logits.shape , a )
lowercase__ : Any = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] , device=a , )
lowercase__ : List[str] = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] , device=a )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , a , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , a , atol=1e-4 ) )
# verify postprocessing
lowercase__ : Optional[Any] = image_processor.post_process_object_detection(
a , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
lowercase__ : str = torch.tensor([0.9_994, 0.9_790, 0.9_964, 0.9_972, 0.9_861] ).to(a )
lowercase__ : Any = [7_5, 7_5, 1_7, 6_3, 1_7]
lowercase__ : Optional[int] = torch.tensor([335.0_609, 79.3_848, 375.4_216, 187.2_495] ).to(a )
self.assertEqual(len(results['scores'] ) , 5 )
self.assertTrue(torch.allclose(results['scores'] , a , atol=1e-4 ) )
self.assertSequenceEqual(results['labels'].tolist() , a )
self.assertTrue(torch.allclose(results['boxes'][0, :] , a ) )
| 645
| 1
|
"""simple docstring"""
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 645
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
_UpperCamelCase : int = logging.get_logger(__name__)
@dataclass
class UpperCAmelCase_ :
def __init__( self , a=False , a=False , a=6.0 , a=None , a=False , a=False , a=None , a="fp4" , a=False , **a , ) -> Tuple:
lowercase__ : str = load_in_abit
lowercase__ : str = load_in_abit
lowercase__ : List[str] = llm_inta_threshold
lowercase__ : Dict = llm_inta_skip_modules
lowercase__ : Tuple = llm_inta_enable_fpaa_cpu_offload
lowercase__ : Any = llm_inta_has_fpaa_weight
lowercase__ : Any = bnb_abit_quant_type
lowercase__ : Dict = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
lowercase__ : Dict = torch.floataa
elif isinstance(a , a ):
lowercase__ : Any = getattr(a , a )
elif isinstance(a , torch.dtype ):
lowercase__ : Any = bnb_abit_compute_dtype
else:
raise ValueError('bnb_4bit_compute_dtype must be a string or a torch.dtype' )
self.post_init()
def _UpperCAmelCase ( self ) -> str:
if not isinstance(self.llm_inta_threshold , a ):
raise ValueError('llm_int8_threshold must be a float' )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , a ):
raise ValueError('llm_int8_skip_modules must be a list of strings' )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , a ):
raise ValueError('llm_int8_enable_fp32_cpu_offload must be a boolean' )
if not isinstance(self.llm_inta_has_fpaa_weight , a ):
raise ValueError('llm_int8_has_fp16_weight must be a boolean' )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError('bnb_4bit_compute_dtype must be torch.dtype' )
if not isinstance(self.bnb_abit_quant_type , a ):
raise ValueError('bnb_4bit_quant_type must be a string' )
if not isinstance(self.bnb_abit_use_double_quant , a ):
raise ValueError('bnb_4bit_use_double_quant must be a boolean' )
if self.load_in_abit and not version.parse(importlib.metadata.version('bitsandbytes' ) ) >= version.parse(
'0.39.0' ):
raise ValueError(
'4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version' )
def _UpperCAmelCase ( self ) -> Tuple:
return self.load_in_abit or self.load_in_abit
def _UpperCAmelCase ( self ) -> List[str]:
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def _UpperCAmelCase ( cls , a , a , **a ) -> Optional[Any]:
lowercase__ : List[Any] = cls(**a )
lowercase__ : Union[str, Any] = []
for key, value in kwargs.items():
if hasattr(a , a ):
setattr(a , a , a )
to_remove.append(a )
for key in to_remove:
kwargs.pop(a , a )
if return_unused_kwargs:
return config, kwargs
else:
return config
def _UpperCAmelCase ( self , a ) -> Dict:
with open(a , 'w' , encoding='utf-8' ) as writer:
lowercase__ : Any = self.to_dict()
lowercase__ : str = json.dumps(a , indent=2 , sort_keys=a ) + '\n'
writer.write(a )
def _UpperCAmelCase ( self ) -> Dict[str, Any]:
lowercase__ : Optional[Any] = copy.deepcopy(self.__dict__ )
lowercase__ : Any = str(output['bnb_4bit_compute_dtype'] ).split('.' )[1]
return output
def __repr__( self ) -> Dict:
return f"""{self.__class__.__name__} {self.to_json_string()}"""
def _UpperCAmelCase ( self , a = True ) -> str:
if use_diff is True:
lowercase__ : List[Any] = self.to_diff_dict()
else:
lowercase__ : List[str] = self.to_dict()
return json.dumps(a , indent=2 , sort_keys=a ) + "\n"
def _UpperCAmelCase ( self ) -> Dict[str, Any]:
lowercase__ : Tuple = self.to_dict()
# get the default config dict
lowercase__ : Optional[Any] = BitsAndBytesConfig().to_dict()
lowercase__ : int = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
lowercase__ : Optional[int] = value
return serializable_config_dict
| 645
| 1
|
"""simple docstring"""
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
_UpperCamelCase : List[Any] = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def a_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict ):
'''simple docstring'''
return max(metric_fn(_lowerCAmelCase , _lowerCAmelCase ) for gt in ground_truths )
def a_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : Any = [line.strip() for line in open(_lowerCAmelCase , 'r' ).readlines()]
lowercase__ : Optional[int] = []
if args.gold_data_mode == "qa":
lowercase__ : Any = pd.read_csv(_lowerCAmelCase , sep='\t' , header=_lowerCAmelCase )
for answer_list in data[1]:
lowercase__ : str = ast.literal_eval(_lowerCAmelCase )
answers.append(_lowerCAmelCase )
else:
lowercase__ : List[Any] = [line.strip() for line in open(_lowerCAmelCase , 'r' ).readlines()]
lowercase__ : int = [[reference] for reference in references]
lowercase__ : Tuple = 0
for prediction, ground_truths in zip(_lowerCAmelCase , _lowerCAmelCase ):
total += 1
em += metric_max_over_ground_truths(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
fa += metric_max_over_ground_truths(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
lowercase__ : str = 1_0_0.0 * em / total
lowercase__ : str = 1_0_0.0 * fa / total
logger.info(f"""F1: {fa:.2f}""" )
logger.info(f"""EM: {em:.2f}""" )
def a_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple ):
'''simple docstring'''
lowercase__ : Optional[Any] = args.k
lowercase__ : Optional[int] = [line.strip() for line in open(_lowerCAmelCase , 'r' ).readlines()]
lowercase__ : List[str] = [line.strip() for line in open(_lowerCAmelCase , 'r' ).readlines()]
lowercase__ : List[Any] = 0
for hypo, reference in zip(_lowerCAmelCase , _lowerCAmelCase ):
lowercase__ : Union[str, Any] = set(hypo.split('\t' )[:k] )
lowercase__ : Tuple = set(reference.split('\t' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
lowercase__ : int = 1_0_0.0 * em / total
logger.info(f"""Precision@{k}: {em: .2f}""" )
def a_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
def strip_title(_lowerCAmelCase : List[Any] ):
if title.startswith('"' ):
lowercase__ : int = title[1:]
if title.endswith('"' ):
lowercase__ : Optional[int] = title[:-1]
return title
lowercase__ : List[Any] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_lowerCAmelCase , return_tensors='pt' , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , )['input_ids'].to(args.device )
lowercase__ : Optional[Any] = rag_model.rag.question_encoder(_lowerCAmelCase )
lowercase__ : Optional[Any] = question_enc_outputs[0]
lowercase__ : str = rag_model.retriever(
_lowerCAmelCase , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='pt' , )
lowercase__ : List[str] = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
lowercase__ : str = []
for docs in all_docs:
lowercase__ : Tuple = [strip_title(_lowerCAmelCase ) for title in docs['title']]
provenance_strings.append('\t'.join(_lowerCAmelCase ) )
return provenance_strings
def a_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
with torch.no_grad():
lowercase__ : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_lowerCAmelCase , return_tensors='pt' , padding=_lowerCAmelCase , truncation=_lowerCAmelCase )
lowercase__ : int = inputs_dict.input_ids.to(args.device )
lowercase__ : int = inputs_dict.attention_mask.to(args.device )
lowercase__ : Any = rag_model.generate( # rag_model overwrites generate
_lowerCAmelCase , attention_mask=_lowerCAmelCase , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=_lowerCAmelCase , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
lowercase__ : List[str] = rag_model.retriever.generator_tokenizer.batch_decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
if args.print_predictions:
for q, a in zip(_lowerCAmelCase , _lowerCAmelCase ):
logger.info('Q: {} - A: {}'.format(_lowerCAmelCase , _lowerCAmelCase ) )
return answers
def a_ ( ):
'''simple docstring'''
lowercase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'--model_type' , choices=['rag_sequence', 'rag_token', 'bart'] , type=_lowerCAmelCase , help=(
'RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'
' model_name_or_path'
) , )
parser.add_argument(
'--index_name' , default=_lowerCAmelCase , choices=['exact', 'compressed', 'legacy'] , type=_lowerCAmelCase , help='RAG model retriever type' , )
parser.add_argument(
'--index_path' , default=_lowerCAmelCase , type=_lowerCAmelCase , help='Path to the retrieval index' , )
parser.add_argument('--n_docs' , default=5 , type=_lowerCAmelCase , help='Number of retrieved docs' )
parser.add_argument(
'--model_name_or_path' , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help='Path to pretrained checkpoints or model identifier from huggingface.co/models' , )
parser.add_argument(
'--eval_mode' , choices=['e2e', 'retrieval'] , default='e2e' , type=_lowerCAmelCase , help=(
'Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'
' precision@k.'
) , )
parser.add_argument('--k' , default=1 , type=_lowerCAmelCase , help='k for the precision@k calculation' )
parser.add_argument(
'--evaluation_set' , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help='Path to a file containing evaluation samples' , )
parser.add_argument(
'--gold_data_path' , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help='Path to a tab-separated file with gold samples' , )
parser.add_argument(
'--gold_data_mode' , default='qa' , type=_lowerCAmelCase , choices=['qa', 'ans'] , help=(
'Format of the gold data file'
'qa - a single line in the following format: question [tab] answer_list'
'ans - a single line of the gold file contains the expected answer string'
) , )
parser.add_argument(
'--predictions_path' , type=_lowerCAmelCase , default='predictions.txt' , help='Name of the predictions file, to be stored in the checkpoints directory' , )
parser.add_argument(
'--eval_all_checkpoints' , action='store_true' , help='Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number' , )
parser.add_argument(
'--eval_batch_size' , default=8 , type=_lowerCAmelCase , help='Batch size per GPU/CPU for evaluation.' , )
parser.add_argument(
'--recalculate' , help='Recalculate predictions even if the prediction file exists' , action='store_true' , )
parser.add_argument(
'--num_beams' , default=4 , type=_lowerCAmelCase , help='Number of beams to be used when generating answers' , )
parser.add_argument('--min_length' , default=1 , type=_lowerCAmelCase , help='Min length of the generated answers' )
parser.add_argument('--max_length' , default=50 , type=_lowerCAmelCase , help='Max length of the generated answers' )
parser.add_argument(
'--print_predictions' , action='store_true' , help='If True, prints predictions while evaluating.' , )
parser.add_argument(
'--print_docs' , action='store_true' , help='If True, prints docs retried while generating.' , )
lowercase__ : Optional[int] = parser.parse_args()
lowercase__ : Tuple = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
return args
def a_ ( _lowerCAmelCase : List[str] ):
'''simple docstring'''
lowercase__ : int = {}
if args.model_type is None:
lowercase__ : Union[str, Any] = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('rag' ):
lowercase__ : Tuple = RagTokenForGeneration if args.model_type == 'rag_token' else RagSequenceForGeneration
lowercase__ : Tuple = args.n_docs
if args.index_name is not None:
lowercase__ : Any = args.index_name
if args.index_path is not None:
lowercase__ : Tuple = args.index_path
else:
lowercase__ : Tuple = BartForConditionalGeneration
lowercase__ : Optional[Any] = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('Evaluate the following checkpoints: %s' , _lowerCAmelCase )
lowercase__ : Dict = get_scores if args.eval_mode == 'e2e' else get_precision_at_k
lowercase__ : Union[str, Any] = evaluate_batch_eae if args.eval_mode == 'e2e' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('Calculating metrics based on an existing predictions file: {}'.format(args.predictions_path ) )
score_fn(_lowerCAmelCase , args.predictions_path , args.gold_data_path )
continue
logger.info('***** Running evaluation for {} *****'.format(_lowerCAmelCase ) )
logger.info(' Batch size = %d' , args.eval_batch_size )
logger.info(' Predictions will be stored under {}'.format(args.predictions_path ) )
if args.model_type.startswith('rag' ):
lowercase__ : Tuple = RagRetriever.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
lowercase__ : List[str] = model_class.from_pretrained(_lowerCAmelCase , retriever=_lowerCAmelCase , **_lowerCAmelCase )
model.retriever.init_retrieval()
else:
lowercase__ : Optional[int] = model_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
model.to(args.device )
with open(args.evaluation_set , 'r' ) as eval_file, open(args.predictions_path , 'w' ) as preds_file:
lowercase__ : Dict = []
for line in tqdm(_lowerCAmelCase ):
questions.append(line.strip() )
if len(_lowerCAmelCase ) == args.eval_batch_size:
lowercase__ : List[str] = evaluate_batch_fn(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
preds_file.write('\n'.join(_lowerCAmelCase ) + '\n' )
preds_file.flush()
lowercase__ : Union[str, Any] = []
if len(_lowerCAmelCase ) > 0:
lowercase__ : Union[str, Any] = evaluate_batch_fn(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
preds_file.write('\n'.join(_lowerCAmelCase ) )
preds_file.flush()
score_fn(_lowerCAmelCase , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
_UpperCamelCase : Any = get_args()
main(args)
| 645
|
"""simple docstring"""
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_UpperCamelCase : int = 16
_UpperCamelCase : Union[str, Any] = 32
def a_ ( _lowerCAmelCase : Tuple ):
'''simple docstring'''
return int(x / 2**20 )
class UpperCAmelCase_ :
def __enter__( self ) -> Union[str, Any]:
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
lowercase__ : List[str] = torch.cuda.memory_allocated()
return self
def __exit__( self , *a ) -> Any:
gc.collect()
torch.cuda.empty_cache()
lowercase__ : Optional[Any] = torch.cuda.memory_allocated()
lowercase__ : Union[str, Any] = torch.cuda.max_memory_allocated()
lowercase__ : List[Any] = bamb(self.end - self.begin )
lowercase__ : List[Any] = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def a_ ( _lowerCAmelCase : Accelerator , _lowerCAmelCase : int = 16 , _lowerCAmelCase : str = "bert-base-cased" , _lowerCAmelCase : int = 320 , _lowerCAmelCase : int = 160 , ):
'''simple docstring'''
lowercase__ : List[Any] = AutoTokenizer.from_pretrained(_lowerCAmelCase )
lowercase__ : Union[str, Any] = load_dataset(
'glue' , 'mrpc' , split={'train': f"""train[:{n_train}]""", 'validation': f"""validation[:{n_val}]"""} )
def tokenize_function(_lowerCAmelCase : int ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ : List[str] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowercase__ : Union[str, Any] = datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=_lowerCAmelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ : Union[str, Any] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_lowerCAmelCase : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowerCAmelCase , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(_lowerCAmelCase , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
lowercase__ : Dict = DataLoader(
tokenized_datasets['train'] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
lowercase__ : Dict = DataLoader(
tokenized_datasets['validation'] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
return train_dataloader, eval_dataloader
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : List[str] ):
'''simple docstring'''
lowercase__ : List[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ : Optional[int] = config['lr']
lowercase__ : Optional[Any] = int(config['num_epochs'] )
lowercase__ : Optional[Any] = int(config['seed'] )
lowercase__ : int = int(config['batch_size'] )
lowercase__ : Union[str, Any] = args.model_name_or_path
set_seed(_lowerCAmelCase )
lowercase__ , lowercase__ : Tuple = get_dataloaders(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ : List[Any] = AutoModelForSequenceClassification.from_pretrained(_lowerCAmelCase , return_dict=_lowerCAmelCase )
# Instantiate optimizer
lowercase__ : List[Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowercase__ : Optional[Any] = optimizer_cls(params=model.parameters() , lr=_lowerCAmelCase )
if accelerator.state.deepspeed_plugin is not None:
lowercase__ : Optional[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
lowercase__ : List[Any] = 1
lowercase__ : List[Any] = (len(_lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowercase__ : Optional[int] = get_linear_schedule_with_warmup(
optimizer=_lowerCAmelCase , num_warmup_steps=0 , num_training_steps=_lowerCAmelCase , )
else:
lowercase__ : Tuple = DummyScheduler(_lowerCAmelCase , total_num_steps=_lowerCAmelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = accelerator.prepare(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# We need to keep track of how many total steps we have iterated over
lowercase__ : Optional[int] = 0
# We also need to keep track of the stating epoch so files are named properly
lowercase__ : Tuple = 0
# Now we train the model
lowercase__ : Optional[Any] = {}
for epoch in range(_lowerCAmelCase , _lowerCAmelCase ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(_lowerCAmelCase ):
lowercase__ : List[Any] = model(**_lowerCAmelCase )
lowercase__ : Dict = outputs.loss
lowercase__ : int = loss / gradient_accumulation_steps
accelerator.backward(_lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('Memory before entering the train : {}'.format(bamb(tracemalloc.begin ) ) )
accelerator.print('Memory consumed at the end of the train (end-begin): {}'.format(tracemalloc.used ) )
accelerator.print('Peak Memory consumed during the train (max-begin): {}'.format(tracemalloc.peaked ) )
accelerator.print(
'Total Peak Memory consumed during the train (max): {}'.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
lowercase__ : int = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'peak_memory_utilization.json' ) , 'w' ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
def a_ ( ):
'''simple docstring'''
lowercase__ : int = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=_lowerCAmelCase , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_lowerCAmelCase , )
parser.add_argument(
'--output_dir' , type=_lowerCAmelCase , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--peak_memory_upper_bound' , type=_lowerCAmelCase , default=_lowerCAmelCase , help='The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.' , )
parser.add_argument(
'--n_train' , type=_lowerCAmelCase , default=320 , help='Number of training examples to use.' , )
parser.add_argument(
'--n_val' , type=_lowerCAmelCase , default=160 , help='Number of validation examples to use.' , )
parser.add_argument(
'--num_epochs' , type=_lowerCAmelCase , default=1 , help='Number of train epochs.' , )
lowercase__ : Any = parser.parse_args()
lowercase__ : Optional[Any] = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
main()
| 645
| 1
|
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def a_ ( _lowerCAmelCase : Tuple ):
'''simple docstring'''
lowercase__ : str = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
def a_ ( _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ , lowercase__ : Optional[Any] = emb.weight.shape
lowercase__ : Optional[int] = nn.Linear(_lowerCAmelCase , _lowerCAmelCase , bias=_lowerCAmelCase )
lowercase__ : Dict = emb.weight.data
return lin_layer
def a_ ( _lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
lowercase__ : Optional[Any] = torch.load(_lowerCAmelCase , map_location='cpu' )
lowercase__ : Optional[int] = mam_aaa['args'] or mam_aaa['cfg']['model']
lowercase__ : Union[str, Any] = mam_aaa['model']
remove_ignore_keys_(_lowerCAmelCase )
lowercase__ : Tuple = state_dict['encoder.embed_tokens.weight'].shape[0]
lowercase__ : Optional[int] = MaMaaaConfig(
vocab_size=_lowerCAmelCase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='relu' , )
lowercase__ : List[Any] = state_dict['decoder.embed_tokens.weight']
lowercase__ : List[str] = MaMaaaForConditionalGeneration(_lowerCAmelCase )
model.model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
lowercase__ : Optional[int] = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
_UpperCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
_UpperCamelCase : List[Any] = parser.parse_args()
_UpperCamelCase : Tuple = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 645
|
"""simple docstring"""
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : Any = [0] * len(_lowerCAmelCase )
for i in range(1 , len(_lowerCAmelCase ) ):
# use last results for better performance - dynamic programming
lowercase__ : List[str] = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
lowercase__ : Dict = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
lowercase__ : Union[str, Any] = j
return prefix_result
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
return max(prefix_function(_lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 645
| 1
|
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class UpperCAmelCase_ ( unittest.TestCase):
def __init__( self , a , a=7 , a=3 , a=3_0 , a=4_0_0 , a=True , a=None , a=True , a=[0.5, 0.5, 0.5] , a=[0.5, 0.5, 0.5] , a=True , a=1 / 2_5_5 , a=True , ) -> Union[str, Any]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowercase__ : str = size if size is not None else {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3}
lowercase__ : str = parent
lowercase__ : Optional[Any] = batch_size
lowercase__ : str = num_channels
lowercase__ : Optional[Any] = min_resolution
lowercase__ : int = max_resolution
lowercase__ : Any = do_resize
lowercase__ : Dict = size
lowercase__ : Union[str, Any] = do_normalize
lowercase__ : Optional[Any] = image_mean
lowercase__ : List[Any] = image_std
lowercase__ : int = do_rescale
lowercase__ : str = rescale_factor
lowercase__ : List[Any] = do_pad
def _UpperCAmelCase ( self ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _UpperCAmelCase ( self , a , a=False ) -> int:
if not batched:
lowercase__ : List[str] = image_inputs[0]
if isinstance(a , Image.Image ):
lowercase__ , lowercase__ : str = image.size
else:
lowercase__ , lowercase__ : List[Any] = image.shape[1], image.shape[2]
if w < h:
lowercase__ : str = int(self.size['shortest_edge'] * h / w )
lowercase__ : List[str] = self.size['shortest_edge']
elif w > h:
lowercase__ : str = self.size['shortest_edge']
lowercase__ : Union[str, Any] = int(self.size['shortest_edge'] * w / h )
else:
lowercase__ : int = self.size['shortest_edge']
lowercase__ : int = self.size['shortest_edge']
else:
lowercase__ : Union[str, Any] = []
for image in image_inputs:
lowercase__ , lowercase__ : Any = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowercase__ : Union[str, Any] = max(a , key=lambda a : item[0] )[0]
lowercase__ : Optional[Any] = max(a , key=lambda a : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCAmelCase_ ( _a , unittest.TestCase):
lowerCamelCase__ : Any = DeformableDetrImageProcessor if is_vision_available() else None
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Union[str, Any] = DeformableDetrImageProcessingTester(self )
@property
def _UpperCAmelCase ( self ) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self ) -> int:
lowercase__ : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , 'image_mean' ) )
self.assertTrue(hasattr(a , 'image_std' ) )
self.assertTrue(hasattr(a , 'do_normalize' ) )
self.assertTrue(hasattr(a , 'do_resize' ) )
self.assertTrue(hasattr(a , 'do_rescale' ) )
self.assertTrue(hasattr(a , 'do_pad' ) )
self.assertTrue(hasattr(a , 'size' ) )
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3} )
self.assertEqual(image_processor.do_pad , a )
lowercase__ : Optional[int] = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=a )
self.assertEqual(image_processor.size , {'shortest_edge': 4_2, 'longest_edge': 8_4} )
self.assertEqual(image_processor.do_pad , a )
def _UpperCAmelCase ( self ) -> Any:
pass
def _UpperCAmelCase ( self ) -> str:
# Initialize image_processing
lowercase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
lowercase__ : Any = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowercase__ , lowercase__ : List[str] = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__ , lowercase__ : Dict = self.image_processor_tester.get_expected_values(a , batched=a )
lowercase__ : Optional[int] = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
# Initialize image_processing
lowercase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
lowercase__ : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowercase__ , lowercase__ : Dict = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__ : Tuple = image_processing(a , return_tensors='pt' ).pixel_values
lowercase__ , lowercase__ : List[Any] = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _UpperCAmelCase ( self ) -> List[Any]:
# Initialize image_processing
lowercase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
lowercase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowercase__ , lowercase__ : List[Any] = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__ : List[str] = image_processing(a , return_tensors='pt' ).pixel_values
lowercase__ , lowercase__ : Optional[Any] = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _UpperCAmelCase ( self ) -> Any:
# prepare image and target
lowercase__ : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
lowercase__ : Dict = json.loads(f.read() )
lowercase__ : Tuple = {'image_id': 3_9_7_6_9, 'annotations': target}
# encode them
lowercase__ : Optional[int] = DeformableDetrImageProcessor()
lowercase__ : Tuple = image_processing(images=a , annotations=a , return_tensors='pt' )
# verify pixel values
lowercase__ : int = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['pixel_values'].shape , a )
lowercase__ : Dict = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , a , atol=1e-4 ) )
# verify area
lowercase__ : Any = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , a ) )
# verify boxes
lowercase__ : List[str] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , a )
lowercase__ : Optional[int] = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , a , atol=1e-3 ) )
# verify image_id
lowercase__ : Any = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , a ) )
# verify is_crowd
lowercase__ : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , a ) )
# verify class_labels
lowercase__ : str = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , a ) )
# verify orig_size
lowercase__ : Tuple = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , a ) )
# verify size
lowercase__ : Union[str, Any] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , a ) )
@slow
def _UpperCAmelCase ( self ) -> Optional[Any]:
# prepare image, target and masks_path
lowercase__ : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
lowercase__ : Optional[int] = json.loads(f.read() )
lowercase__ : int = {'file_name': '000000039769.png', 'image_id': 3_9_7_6_9, 'segments_info': target}
lowercase__ : Optional[Any] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
lowercase__ : int = DeformableDetrImageProcessor(format='coco_panoptic' )
lowercase__ : Tuple = image_processing(images=a , annotations=a , masks_path=a , return_tensors='pt' )
# verify pixel values
lowercase__ : int = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['pixel_values'].shape , a )
lowercase__ : Union[str, Any] = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , a , atol=1e-4 ) )
# verify area
lowercase__ : Dict = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , a ) )
# verify boxes
lowercase__ : List[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , a )
lowercase__ : Optional[Any] = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , a , atol=1e-3 ) )
# verify image_id
lowercase__ : Dict = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , a ) )
# verify is_crowd
lowercase__ : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , a ) )
# verify class_labels
lowercase__ : int = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , a ) )
# verify masks
lowercase__ : List[Any] = 8_2_2_8_7_3
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , a )
# verify orig_size
lowercase__ : Optional[Any] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , a ) )
# verify size
lowercase__ : List[str] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , a ) )
| 645
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCAmelCase_ ( unittest.TestCase):
def __init__( self , a , a=7 , a=3 , a=1_8 , a=3_0 , a=4_0_0 , a=True , a=None , a=True , a=None , a=True , ) -> List[str]:
lowercase__ : Tuple = size if size is not None else {'shortest_edge': 2_0}
lowercase__ : Union[str, Any] = crop_size if crop_size is not None else {'height': 1_8, 'width': 1_8}
lowercase__ : Optional[int] = parent
lowercase__ : Optional[int] = batch_size
lowercase__ : str = num_channels
lowercase__ : Any = image_size
lowercase__ : Optional[Any] = min_resolution
lowercase__ : int = max_resolution
lowercase__ : List[Any] = do_resize
lowercase__ : List[str] = size
lowercase__ : str = do_center_crop
lowercase__ : List[Any] = crop_size
lowercase__ : Union[str, Any] = do_flip_channel_order
def _UpperCAmelCase ( self ) -> int:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class UpperCAmelCase_ ( _a , unittest.TestCase):
lowerCamelCase__ : Optional[Any] = MobileViTImageProcessor if is_vision_available() else None
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Tuple = MobileViTImageProcessingTester(self )
@property
def _UpperCAmelCase ( self ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , 'do_resize' ) )
self.assertTrue(hasattr(a , 'size' ) )
self.assertTrue(hasattr(a , 'do_center_crop' ) )
self.assertTrue(hasattr(a , 'center_crop' ) )
self.assertTrue(hasattr(a , 'do_flip_channel_order' ) )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 2_0} )
self.assertEqual(image_processor.crop_size , {'height': 1_8, 'width': 1_8} )
lowercase__ : str = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {'shortest_edge': 4_2} )
self.assertEqual(image_processor.crop_size , {'height': 8_4, 'width': 8_4} )
def _UpperCAmelCase ( self ) -> Tuple:
pass
def _UpperCAmelCase ( self ) -> str:
# Initialize image_processing
lowercase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
lowercase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase__ : List[Any] = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _UpperCAmelCase ( self ) -> Tuple:
# Initialize image_processing
lowercase__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
lowercase__ : List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase__ : Any = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _UpperCAmelCase ( self ) -> Dict:
# Initialize image_processing
lowercase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
lowercase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase__ : Tuple = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 645
| 1
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class UpperCAmelCase_ ( _a):
def __init__( self ) -> Any:
lowercase__ : Tuple = []
def _UpperCAmelCase ( self , a , a , a , **a ) -> Any:
self.events.append('on_init_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Optional[int]:
self.events.append('on_train_begin' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> List[str]:
self.events.append('on_train_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> int:
self.events.append('on_epoch_begin' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Optional[Any]:
self.events.append('on_epoch_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> int:
self.events.append('on_step_begin' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> str:
self.events.append('on_step_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> int:
self.events.append('on_evaluate' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Tuple:
self.events.append('on_predict' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Union[str, Any]:
self.events.append('on_save' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> List[str]:
self.events.append('on_log' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Any:
self.events.append('on_prediction_step' )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self ) -> str:
lowercase__ : str = tempfile.mkdtemp()
def _UpperCAmelCase ( self ) -> Dict:
shutil.rmtree(self.output_dir )
def _UpperCAmelCase ( self , a=0 , a=0 , a=6_4 , a=6_4 , a=None , a=False , **a ) -> int:
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
lowercase__ : str = RegressionDataset(length=a )
lowercase__ : Any = RegressionDataset(length=a )
lowercase__ : Optional[Any] = RegressionModelConfig(a=a , b=a )
lowercase__ : Union[str, Any] = RegressionPreTrainedModel(a )
lowercase__ : Tuple = TrainingArguments(self.output_dir , disable_tqdm=a , report_to=[] , **a )
return Trainer(
a , a , train_dataset=a , eval_dataset=a , callbacks=a , )
def _UpperCAmelCase ( self , a , a ) -> Union[str, Any]:
self.assertEqual(len(a ) , len(a ) )
# Order doesn't matter
lowercase__ : Optional[int] = sorted(a , key=lambda a : cb.__name__ if isinstance(a , a ) else cb.__class__.__name__ )
lowercase__ : Tuple = sorted(a , key=lambda a : cb.__name__ if isinstance(a , a ) else cb.__class__.__name__ )
for cba, cba in zip(a , a ):
if isinstance(a , a ) and isinstance(a , a ):
self.assertEqual(a , a )
elif isinstance(a , a ) and not isinstance(a , a ):
self.assertEqual(a , cba.__class__ )
elif not isinstance(a , a ) and isinstance(a , a ):
self.assertEqual(cba.__class__ , a )
else:
self.assertEqual(a , a )
def _UpperCAmelCase ( self , a ) -> Optional[Any]:
lowercase__ : Dict = ['on_init_end', 'on_train_begin']
lowercase__ : List[Any] = 0
lowercase__ : Optional[int] = len(trainer.get_eval_dataloader() )
lowercase__ : Tuple = ['on_prediction_step'] * len(trainer.get_eval_dataloader() ) + ['on_log', 'on_evaluate']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('on_epoch_begin' )
for _ in range(a ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('on_log' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('on_save' )
expected_events.append('on_epoch_end' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : int = self.get_trainer()
lowercase__ : str = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
# Callbacks passed at init are added to the default callbacks
lowercase__ : str = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
lowercase__ : List[Any] = self.get_trainer(disable_tqdm=a )
lowercase__ : Optional[Any] = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : int = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
lowercase__ : List[str] = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(a )
expected_callbacks.remove(a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
lowercase__ : Optional[Any] = self.get_trainer()
lowercase__ : List[Any] = trainer.pop_callback(a )
self.assertEqual(cb.__class__ , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
trainer.add_callback(a )
expected_callbacks.insert(0 , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
# We can also add, pop, or remove by instance
lowercase__ : int = self.get_trainer()
lowercase__ : List[str] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(a )
expected_callbacks.remove(a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
lowercase__ : Tuple = self.get_trainer()
lowercase__ : Dict = trainer.callback_handler.callbacks[0]
lowercase__ : Union[str, Any] = trainer.pop_callback(a )
self.assertEqual(a , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
trainer.add_callback(a )
expected_callbacks.insert(0 , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
def _UpperCAmelCase ( self ) -> Tuple:
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='ignore' , category=a )
lowercase__ : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
lowercase__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
# Independent log/save/eval
lowercase__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
lowercase__ : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
lowercase__ : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
lowercase__ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
lowercase__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='steps' )
trainer.train()
lowercase__ : Optional[int] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
lowercase__ : int = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='epoch' )
trainer.train()
lowercase__ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
# A bit of everything
lowercase__ : Any = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=1_0 , eval_steps=5 , evaluation_strategy='steps' , )
trainer.train()
lowercase__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
# warning should be emitted for duplicated callbacks
with patch('transformers.trainer_callback.logger.warning' ) as warn_mock:
lowercase__ : str = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(a ) in warn_mock.call_args[0][0]
| 645
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class UpperCAmelCase_ ( unittest.TestCase):
def __init__( self , a , a=1_3 , a=7 , a=True , a=True , a=True , a=True , a=9_9 , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=5_1_2 , a=1_6 , a=2 , a=0.02 , a=4 , ) -> Dict:
lowercase__ : Optional[Any] = parent
lowercase__ : Dict = batch_size
lowercase__ : List[Any] = seq_length
lowercase__ : int = is_training
lowercase__ : str = use_attention_mask
lowercase__ : Dict = use_token_type_ids
lowercase__ : Optional[int] = use_labels
lowercase__ : Tuple = vocab_size
lowercase__ : List[str] = hidden_size
lowercase__ : Union[str, Any] = num_hidden_layers
lowercase__ : int = num_attention_heads
lowercase__ : Dict = intermediate_size
lowercase__ : List[str] = hidden_act
lowercase__ : Dict = hidden_dropout_prob
lowercase__ : Tuple = attention_probs_dropout_prob
lowercase__ : List[str] = max_position_embeddings
lowercase__ : int = type_vocab_size
lowercase__ : List[str] = type_sequence_label_size
lowercase__ : Union[str, Any] = initializer_range
lowercase__ : Optional[int] = num_choices
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : str = None
if self.use_attention_mask:
lowercase__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : List[str] = None
if self.use_token_type_ids:
lowercase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ : Any = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Optional[int] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Tuple = config_and_inputs
lowercase__ : Union[str, Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase_ ( _a , unittest.TestCase):
lowerCamelCase__ : Tuple = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Union[str, Any] = FlaxAlbertModelTester(self )
@slow
def _UpperCAmelCase ( self ) -> str:
for model_class_name in self.all_model_classes:
lowercase__ : str = model_class_name.from_pretrained('albert-base-v2' )
lowercase__ : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(a )
@require_flax
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : str = FlaxAlbertModel.from_pretrained('albert-base-v2' )
lowercase__ : Optional[int] = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
lowercase__ : Optional[Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowercase__ : Any = model(a , attention_mask=a )[0]
lowercase__ : Tuple = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , a )
lowercase__ : Optional[Any] = np.array(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) )
| 645
| 1
|
"""simple docstring"""
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_UpperCamelCase : int = 16
_UpperCamelCase : Union[str, Any] = 32
def a_ ( _lowerCAmelCase : Tuple ):
'''simple docstring'''
return int(x / 2**20 )
class UpperCAmelCase_ :
def __enter__( self ) -> Union[str, Any]:
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
lowercase__ : List[str] = torch.cuda.memory_allocated()
return self
def __exit__( self , *a ) -> Any:
gc.collect()
torch.cuda.empty_cache()
lowercase__ : Optional[Any] = torch.cuda.memory_allocated()
lowercase__ : Union[str, Any] = torch.cuda.max_memory_allocated()
lowercase__ : List[Any] = bamb(self.end - self.begin )
lowercase__ : List[Any] = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def a_ ( _lowerCAmelCase : Accelerator , _lowerCAmelCase : int = 16 , _lowerCAmelCase : str = "bert-base-cased" , _lowerCAmelCase : int = 320 , _lowerCAmelCase : int = 160 , ):
'''simple docstring'''
lowercase__ : List[Any] = AutoTokenizer.from_pretrained(_lowerCAmelCase )
lowercase__ : Union[str, Any] = load_dataset(
'glue' , 'mrpc' , split={'train': f"""train[:{n_train}]""", 'validation': f"""validation[:{n_val}]"""} )
def tokenize_function(_lowerCAmelCase : int ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ : List[str] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowercase__ : Union[str, Any] = datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=_lowerCAmelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ : Union[str, Any] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_lowerCAmelCase : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowerCAmelCase , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(_lowerCAmelCase , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
lowercase__ : Dict = DataLoader(
tokenized_datasets['train'] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
lowercase__ : Dict = DataLoader(
tokenized_datasets['validation'] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
return train_dataloader, eval_dataloader
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : List[str] ):
'''simple docstring'''
lowercase__ : List[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ : Optional[int] = config['lr']
lowercase__ : Optional[Any] = int(config['num_epochs'] )
lowercase__ : Optional[Any] = int(config['seed'] )
lowercase__ : int = int(config['batch_size'] )
lowercase__ : Union[str, Any] = args.model_name_or_path
set_seed(_lowerCAmelCase )
lowercase__ , lowercase__ : Tuple = get_dataloaders(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ : List[Any] = AutoModelForSequenceClassification.from_pretrained(_lowerCAmelCase , return_dict=_lowerCAmelCase )
# Instantiate optimizer
lowercase__ : List[Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowercase__ : Optional[Any] = optimizer_cls(params=model.parameters() , lr=_lowerCAmelCase )
if accelerator.state.deepspeed_plugin is not None:
lowercase__ : Optional[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
lowercase__ : List[Any] = 1
lowercase__ : List[Any] = (len(_lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowercase__ : Optional[int] = get_linear_schedule_with_warmup(
optimizer=_lowerCAmelCase , num_warmup_steps=0 , num_training_steps=_lowerCAmelCase , )
else:
lowercase__ : Tuple = DummyScheduler(_lowerCAmelCase , total_num_steps=_lowerCAmelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = accelerator.prepare(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# We need to keep track of how many total steps we have iterated over
lowercase__ : Optional[int] = 0
# We also need to keep track of the stating epoch so files are named properly
lowercase__ : Tuple = 0
# Now we train the model
lowercase__ : Optional[Any] = {}
for epoch in range(_lowerCAmelCase , _lowerCAmelCase ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(_lowerCAmelCase ):
lowercase__ : List[Any] = model(**_lowerCAmelCase )
lowercase__ : Dict = outputs.loss
lowercase__ : int = loss / gradient_accumulation_steps
accelerator.backward(_lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('Memory before entering the train : {}'.format(bamb(tracemalloc.begin ) ) )
accelerator.print('Memory consumed at the end of the train (end-begin): {}'.format(tracemalloc.used ) )
accelerator.print('Peak Memory consumed during the train (max-begin): {}'.format(tracemalloc.peaked ) )
accelerator.print(
'Total Peak Memory consumed during the train (max): {}'.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
lowercase__ : int = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'peak_memory_utilization.json' ) , 'w' ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
def a_ ( ):
'''simple docstring'''
lowercase__ : int = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=_lowerCAmelCase , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_lowerCAmelCase , )
parser.add_argument(
'--output_dir' , type=_lowerCAmelCase , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--peak_memory_upper_bound' , type=_lowerCAmelCase , default=_lowerCAmelCase , help='The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.' , )
parser.add_argument(
'--n_train' , type=_lowerCAmelCase , default=320 , help='Number of training examples to use.' , )
parser.add_argument(
'--n_val' , type=_lowerCAmelCase , default=160 , help='Number of validation examples to use.' , )
parser.add_argument(
'--num_epochs' , type=_lowerCAmelCase , default=1 , help='Number of train epochs.' , )
lowercase__ : Any = parser.parse_args()
lowercase__ : Optional[Any] = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
main()
| 645
|
"""simple docstring"""
from collections.abc import Sequence
def a_ ( _lowerCAmelCase : Sequence[float] , _lowerCAmelCase : float ):
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(_lowerCAmelCase ) )
def a_ ( _lowerCAmelCase : Sequence[float] , _lowerCAmelCase : float ):
'''simple docstring'''
lowercase__ : int = 0.0
for coeff in reversed(_lowerCAmelCase ):
lowercase__ : List[Any] = result * x + coeff
return result
if __name__ == "__main__":
_UpperCamelCase : int = (0.0, 0.0, 5.0, 9.3, 7.0)
_UpperCamelCase : Dict = 1_0.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 645
| 1
|
"""simple docstring"""
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class UpperCAmelCase_ ( _a):
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Dict = SMALL_MODEL_IDENTIFIER
lowercase__ : Dict = 'pt'
lowercase__ : Union[str, Any] = 'tf'
def _UpperCAmelCase ( self , a ) -> int:
lowercase__ : Optional[Any] = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(a )
def _UpperCAmelCase ( self , a ) -> int:
lowercase__ : Any = TFAutoModel.from_pretrained(self.test_model , from_pt=a )
model_tf.save_pretrained(a )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : Any = 'mock_framework'
# Framework provided - return whatever the user provides
lowercase__ : Tuple = FeaturesManager.determine_framework(self.test_model , a )
self.assertEqual(a , a )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(a )
lowercase__ : Dict = FeaturesManager.determine_framework(a , a )
self.assertEqual(a , a )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(a )
lowercase__ : Optional[Any] = FeaturesManager.determine_framework(a , a )
self.assertEqual(a , a )
def _UpperCAmelCase ( self ) -> Optional[int]:
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(a )
lowercase__ : str = FeaturesManager.determine_framework(a )
self.assertEqual(a , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(a )
lowercase__ : List[Any] = FeaturesManager.determine_framework(a )
self.assertEqual(a , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(a ):
lowercase__ : List[str] = FeaturesManager.determine_framework(a )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Optional[int] = MagicMock(return_value=a )
with patch('transformers.onnx.features.is_tf_available' , a ):
lowercase__ : Tuple = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(a , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
lowercase__ : Dict = MagicMock(return_value=a )
with patch('transformers.onnx.features.is_torch_available' , a ):
lowercase__ : List[Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(a , self.framework_tf )
# Both in environment -> use PyTorch
lowercase__ : Union[str, Any] = MagicMock(return_value=a )
lowercase__ : Tuple = MagicMock(return_value=a )
with patch('transformers.onnx.features.is_tf_available' , a ), patch(
'transformers.onnx.features.is_torch_available' , a ):
lowercase__ : List[str] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(a , self.framework_pt )
# Both not in environment -> raise error
lowercase__ : Union[str, Any] = MagicMock(return_value=a )
lowercase__ : List[Any] = MagicMock(return_value=a )
with patch('transformers.onnx.features.is_tf_available' , a ), patch(
'transformers.onnx.features.is_torch_available' , a ):
with self.assertRaises(a ):
lowercase__ : Any = FeaturesManager.determine_framework(self.test_model )
| 645
|
"""simple docstring"""
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
_UpperCamelCase : Any = [
{"dataset": "wikipedia", "config_name": "20220301.de"},
{"dataset": "wikipedia", "config_name": "20220301.en"},
{"dataset": "wikipedia", "config_name": "20220301.fr"},
{"dataset": "wikipedia", "config_name": "20220301.frr"},
{"dataset": "wikipedia", "config_name": "20220301.it"},
{"dataset": "wikipedia", "config_name": "20220301.simple"},
{"dataset": "snli", "config_name": "plain_text"},
{"dataset": "eli5", "config_name": "LFQA_reddit"},
{"dataset": "wiki40b", "config_name": "en"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"},
{"dataset": "natural_questions", "config_name": "default"},
]
def a_ ( _lowerCAmelCase : Optional[Any]=True ):
'''simple docstring'''
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_a))
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : str = None
lowerCamelCase__ : Optional[Any] = None
def _UpperCAmelCase ( self , a , a ) -> List[Any]:
with TemporaryDirectory() as tmp_dir:
lowercase__ : List[str] = dataset_module_factory(a , cache_dir=a )
lowercase__ : List[Any] = import_main_class(dataset_module.module_path , dataset=a )
lowercase__ : DatasetBuilder = builder_cls(
cache_dir=a , config_name=a , hash=dataset_module.hash , )
lowercase__ : Union[str, Any] = '/'.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=a ).replace(os.sep , '/' ),
config.DATASET_INFO_FILENAME,
] )
lowercase__ : Union[str, Any] = cached_path(a , cache_dir=a )
self.assertTrue(os.path.exists(a ) )
@pytest.mark.integration
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : Union[str, Any] = tmp_path_factory.mktemp('test_hf_gcp' ) / 'test_wikipedia_simple'
lowercase__ : int = dataset_module_factory('wikipedia' , cache_dir=_lowerCAmelCase )
lowercase__ : Optional[int] = import_main_class(dataset_module.module_path )
lowercase__ : DatasetBuilder = builder_cls(
cache_dir=_lowerCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
lowercase__ : Optional[int] = None
builder_instance.download_and_prepare()
lowercase__ : Optional[int] = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def a_ ( _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : Optional[int] = dataset_module_factory('wikipedia' , cache_dir=_lowerCAmelCase )
lowercase__ : List[str] = import_main_class(dataset_module.module_path , dataset=_lowerCAmelCase )
lowercase__ : DatasetBuilder = builder_cls(
cache_dir=_lowerCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
lowercase__ : Union[str, Any] = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert "train" in ds
assert isinstance(ds['train'] , _lowerCAmelCase )
assert next(iter(ds['train'] ) )
| 645
| 1
|
"""simple docstring"""
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_UpperCamelCase : Union[str, Any] = 16
_UpperCamelCase : Optional[Any] = 32
def a_ ( _lowerCAmelCase : Accelerator , _lowerCAmelCase : DatasetDict , _lowerCAmelCase : List[int] , _lowerCAmelCase : List[int] , _lowerCAmelCase : int = 16 ):
'''simple docstring'''
lowercase__ : int = AutoTokenizer.from_pretrained('bert-base-cased' )
lowercase__ : str = DatasetDict(
{
'train': dataset['train'].select(_lowerCAmelCase ),
'validation': dataset['train'].select(_lowerCAmelCase ),
'test': dataset['validation'],
} )
def tokenize_function(_lowerCAmelCase : Dict ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ : List[Any] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase__ : Union[str, Any] = datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ : Any = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_lowerCAmelCase : List[str] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase__ : Tuple = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase__ : Optional[Any] = 16
elif accelerator.mixed_precision != "no":
lowercase__ : Dict = 8
else:
lowercase__ : Optional[int] = None
return tokenizer.pad(
_lowerCAmelCase , padding='longest' , max_length=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_tensors='pt' , )
# Instantiate dataloaders.
lowercase__ : List[str] = DataLoader(
tokenized_datasets['train'] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
lowercase__ : str = DataLoader(
tokenized_datasets['validation'] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
lowercase__ : Dict = DataLoader(
tokenized_datasets['test'] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
return train_dataloader, eval_dataloader, test_dataloader
def a_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict ):
'''simple docstring'''
lowercase__ : Optional[Any] = []
# Download the dataset
lowercase__ : Optional[Any] = load_dataset('glue' , 'mrpc' )
# Create our splits
lowercase__ : List[str] = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
lowercase__ : List[str] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ : int = config['lr']
lowercase__ : Optional[int] = int(config['num_epochs'] )
lowercase__ : Optional[Any] = int(config['seed'] )
lowercase__ : Optional[int] = int(config['batch_size'] )
lowercase__ : Union[str, Any] = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
lowercase__ : Tuple = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowercase__ : Dict = batch_size // MAX_GPU_BATCH_SIZE
lowercase__ : Dict = MAX_GPU_BATCH_SIZE
set_seed(_lowerCAmelCase )
# New Code #
# Create our folds:
lowercase__ : Optional[Any] = kfold.split(np.zeros(datasets['train'].num_rows ) , datasets['train']['label'] )
lowercase__ : Optional[Any] = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(_lowerCAmelCase ):
lowercase__ , lowercase__ , lowercase__ : Optional[int] = get_fold_dataloaders(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ : Any = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=_lowerCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase__ : Any = model.to(accelerator.device )
# Instantiate optimizer
lowercase__ : Tuple = AdamW(params=model.parameters() , lr=_lowerCAmelCase )
# Instantiate scheduler
lowercase__ : Dict = get_linear_schedule_with_warmup(
optimizer=_lowerCAmelCase , num_warmup_steps=100 , num_training_steps=(len(_lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[str] = accelerator.prepare(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Now we train the model
for epoch in range(_lowerCAmelCase ):
model.train()
for step, batch in enumerate(_lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowercase__ : List[str] = model(**_lowerCAmelCase )
lowercase__ : Dict = outputs.loss
lowercase__ : List[Any] = loss / gradient_accumulation_steps
accelerator.backward(_lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ : str = model(**_lowerCAmelCase )
lowercase__ : str = outputs.logits.argmax(dim=-1 )
lowercase__ , lowercase__ : str = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=_lowerCAmelCase , references=_lowerCAmelCase , )
lowercase__ : Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , _lowerCAmelCase )
# New Code #
# We also run predictions on the test set at the very end
lowercase__ : Optional[int] = []
for step, batch in enumerate(_lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ : Any = model(**_lowerCAmelCase )
lowercase__ : Optional[Any] = outputs.logits
lowercase__ , lowercase__ : Optional[int] = accelerator.gather_for_metrics((predictions, batch['labels']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(_lowerCAmelCase , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
lowercase__ : Tuple = torch.cat(_lowerCAmelCase , dim=0 )
lowercase__ : Optional[int] = torch.stack(_lowerCAmelCase , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
lowercase__ : Optional[int] = metric.compute(predictions=_lowerCAmelCase , references=_lowerCAmelCase )
accelerator.print('Average test metrics from all folds:' , _lowerCAmelCase )
def a_ ( ):
'''simple docstring'''
lowercase__ : Tuple = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=_lowerCAmelCase , default=_lowerCAmelCase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
# New Code #
parser.add_argument('--num_folds' , type=_lowerCAmelCase , default=3 , help='The number of splits to perform across the dataset' )
lowercase__ : List[Any] = parser.parse_args()
lowercase__ : Optional[int] = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
main()
| 645
|
"""simple docstring"""
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def a_ ( _lowerCAmelCase : dict ):
'''simple docstring'''
return (data["data"], data["target"])
def a_ ( _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray ):
'''simple docstring'''
lowercase__ : Any = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(_lowerCAmelCase , _lowerCAmelCase )
# Predict target for test data
lowercase__ : str = xgb.predict(_lowerCAmelCase )
lowercase__ : Union[str, Any] = predictions.reshape(len(_lowerCAmelCase ) , 1 )
return predictions
def a_ ( ):
'''simple docstring'''
lowercase__ : Optional[Any] = fetch_california_housing()
lowercase__ , lowercase__ : str = data_handling(_lowerCAmelCase )
lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = train_test_split(
_lowerCAmelCase , _lowerCAmelCase , test_size=0.2_5 , random_state=1 )
lowercase__ : Any = xgboost(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Error printing
print(f"""Mean Absolute Error : {mean_absolute_error(_lowerCAmelCase , _lowerCAmelCase )}""" )
print(f"""Mean Square Error : {mean_squared_error(_lowerCAmelCase , _lowerCAmelCase )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 645
| 1
|
"""simple docstring"""
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ :
def __init__( self , a , a=1_3 , a=7 , a=True , a=True , a=False , a=True , a=9_9 , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=5_1_2 , a=1_6 , a=2 , a=0.02 , a=3 , a=4 , a=None , ) -> str:
lowercase__ : Optional[int] = parent
lowercase__ : Optional[Any] = batch_size
lowercase__ : str = seq_length
lowercase__ : List[str] = is_training
lowercase__ : List[str] = use_input_mask
lowercase__ : Any = use_token_type_ids
lowercase__ : Tuple = use_labels
lowercase__ : Tuple = vocab_size
lowercase__ : Tuple = hidden_size
lowercase__ : Optional[int] = num_hidden_layers
lowercase__ : Optional[Any] = num_attention_heads
lowercase__ : Union[str, Any] = intermediate_size
lowercase__ : Dict = hidden_act
lowercase__ : Union[str, Any] = hidden_dropout_prob
lowercase__ : Any = attention_probs_dropout_prob
lowercase__ : List[str] = max_position_embeddings
lowercase__ : Optional[int] = type_vocab_size
lowercase__ : Dict = type_sequence_label_size
lowercase__ : Optional[int] = initializer_range
lowercase__ : Union[str, Any] = num_labels
lowercase__ : List[Any] = num_choices
lowercase__ : str = scope
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : Any = None
if self.use_input_mask:
lowercase__ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Optional[int] = None
if self.use_token_type_ids:
lowercase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ : Any = None
lowercase__ : List[Any] = None
lowercase__ : Optional[int] = None
if self.use_labels:
lowercase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self , a , a , a , a , a , a , a ) -> Optional[Any]:
lowercase__ : Optional[Any] = BioGptModel(config=a )
model.to(a )
model.eval()
lowercase__ : List[Any] = model(a , attention_mask=a )
lowercase__ : Optional[int] = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , a , a , a , a , a , a , a , a , a , ) -> Tuple:
lowercase__ : Union[str, Any] = BioGptForCausalLM(config=a )
model.to(a )
model.eval()
lowercase__ : Dict = model(a , attention_mask=a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self , a , a , a , a , a , *a ) -> str:
lowercase__ : Tuple = BioGptModel(config=a )
model.to(a )
model.eval()
# create attention mask
lowercase__ : int = torch.ones(input_ids.shape , dtype=torch.long , device=a )
lowercase__ : List[Any] = self.seq_length // 2
lowercase__ : str = 0
# first forward pass
lowercase__ , lowercase__ : List[str] = model(a , attention_mask=a ).to_tuple()
# create hypothetical next token and extent to next_input_ids
lowercase__ : Union[str, Any] = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
lowercase__ : Optional[int] = ids_tensor((1,) , a ).item() + 1
lowercase__ : List[str] = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
lowercase__ : Optional[int] = random_other_next_tokens
# append to next input_ids and attn_mask
lowercase__ : Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase__ : Union[str, Any] = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=a )] , dim=1 , )
# get two different outputs
lowercase__ : str = model(a , attention_mask=a )['last_hidden_state']
lowercase__ : List[Any] = model(a , past_key_values=a , attention_mask=a )['last_hidden_state']
# select random slice
lowercase__ : str = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase__ : Optional[Any] = output_from_no_past[:, -1, random_slice_idx].detach()
lowercase__ : Dict = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a , a , atol=1e-3 ) )
def _UpperCAmelCase ( self , a , a , a , a , a , *a ) -> Tuple:
lowercase__ : Tuple = BioGptModel(config=a ).to(a ).eval()
lowercase__ : List[Any] = torch.ones(input_ids.shape , dtype=torch.long , device=a )
# first forward pass
lowercase__ : int = model(a , attention_mask=a , use_cache=a )
lowercase__ , lowercase__ : Any = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
lowercase__ : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase__ : Dict = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
lowercase__ : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase__ : str = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
lowercase__ : List[Any] = model(a , attention_mask=a )['last_hidden_state']
lowercase__ : str = model(a , attention_mask=a , past_key_values=a )[
'last_hidden_state'
]
# select random slice
lowercase__ : str = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase__ : Any = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase__ : Optional[int] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a , a , atol=1e-3 ) )
def _UpperCAmelCase ( self , a , a , a , a , a , *a , a=False ) -> Union[str, Any]:
lowercase__ : int = BioGptForCausalLM(a )
model.to(a )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
lowercase__ : List[Any] = model(a , labels=a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def _UpperCAmelCase ( self , a , *a ) -> str:
lowercase__ : str = BioGptModel(a )
lowercase__ : int = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def _UpperCAmelCase ( self , a , a , a , a , a , *a ) -> int:
lowercase__ : Optional[int] = self.num_labels
lowercase__ : List[str] = BioGptForTokenClassification(a )
model.to(a )
model.eval()
lowercase__ : str = model(a , attention_mask=a , token_type_ids=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : List[Any] = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) : Dict = config_and_inputs
lowercase__ : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , _a , unittest.TestCase):
lowerCamelCase__ : Tuple = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
lowerCamelCase__ : str = (BioGptForCausalLM,) if is_torch_available() else ()
lowerCamelCase__ : List[Any] = (
{
"feature-extraction": BioGptModel,
"text-classification": BioGptForSequenceClassification,
"text-generation": BioGptForCausalLM,
"token-classification": BioGptForTokenClassification,
"zero-shot": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : Optional[Any] = False
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : List[Any] = BioGptModelTester(self )
lowercase__ : int = ConfigTester(self , config_class=a , hidden_size=3_7 )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase__ : Optional[Any] = type
self.model_tester.create_and_check_model(*a )
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*a )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*a , gradient_checkpointing=a )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*a )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*a )
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*a )
@slow
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : Tuple = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(a )
lowercase__ : List[Any] = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
lowercase__ : int = 'left'
# Define PAD Token = EOS Token = 50256
lowercase__ : Optional[Any] = tokenizer.eos_token
lowercase__ : Dict = model.config.eos_token_id
# use different length sentences to test batching
lowercase__ : List[Any] = [
'Hello, my dog is a little',
'Today, I',
]
lowercase__ : Optional[int] = tokenizer(a , return_tensors='pt' , padding=a )
lowercase__ : Optional[Any] = inputs['input_ids'].to(a )
lowercase__ : Optional[int] = model.generate(
input_ids=a , attention_mask=inputs['attention_mask'].to(a ) , )
lowercase__ : Optional[Any] = tokenizer(sentences[0] , return_tensors='pt' ).input_ids.to(a )
lowercase__ : Any = model.generate(input_ids=a )
lowercase__ : str = inputs_non_padded.shape[-1] - inputs['attention_mask'][-1].long().sum().cpu().item()
lowercase__ : str = tokenizer(sentences[1] , return_tensors='pt' ).input_ids.to(a )
lowercase__ : int = model.generate(input_ids=a , max_length=model.config.max_length - num_paddings )
lowercase__ : Dict = tokenizer.batch_decode(a , skip_special_tokens=a )
lowercase__ : List[Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=a )
lowercase__ : int = tokenizer.decode(output_padded[0] , skip_special_tokens=a )
lowercase__ : Union[str, Any] = [
'Hello, my dog is a little bit bigger than a little bit.',
'Today, I have a good idea of how to use the information',
]
self.assertListEqual(a , a )
self.assertListEqual(a , [non_padded_sentence, padded_sentence] )
@slow
def _UpperCAmelCase ( self ) -> str:
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Union[str, Any] = BioGptModel.from_pretrained(a )
self.assertIsNotNone(a )
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[str] = 3
lowercase__ : str = input_dict['input_ids']
lowercase__ : List[str] = input_ids.ne(1 ).to(a )
lowercase__ : Dict = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase__ : List[Any] = BioGptForSequenceClassification(a )
model.to(a )
model.eval()
lowercase__ : Any = model(a , attention_mask=a , labels=a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Dict = 3
lowercase__ : Any = 'multi_label_classification'
lowercase__ : Optional[Any] = input_dict['input_ids']
lowercase__ : str = input_ids.ne(1 ).to(a )
lowercase__ : List[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowercase__ : Optional[int] = BioGptForSequenceClassification(a )
model.to(a )
model.eval()
lowercase__ : Optional[Any] = model(a , attention_mask=a , labels=a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : List[Any] = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
lowercase__ : Union[str, Any] = torch.tensor([[2, 4_8_0_5, 9, 6_5_6, 2_1]] )
lowercase__ : List[str] = model(a )[0]
lowercase__ : int = 4_2_3_8_4
lowercase__ : int = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , a )
lowercase__ : Any = torch.tensor(
[[[-9.5_236, -9.8_918, 10.4_557], [-11.0_469, -9.6_423, 8.1_022], [-8.8_664, -7.8_826, 5.5_325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , a , atol=1e-4 ) )
@slow
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : Optional[int] = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
lowercase__ : Optional[Any] = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(a )
torch.manual_seed(0 )
lowercase__ : int = tokenizer('COVID-19 is' , return_tensors='pt' ).to(a )
lowercase__ : Optional[int] = model.generate(
**a , min_length=1_0_0 , max_length=1_0_2_4 , num_beams=5 , early_stopping=a , )
lowercase__ : Optional[Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=a )
lowercase__ : str = (
'COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'
' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'
' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'
' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'
' more than 800,000 deaths.'
)
self.assertEqual(a , a )
| 645
|
"""simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class UpperCAmelCase_ :
def __init__( self , a , a=1_3 , a=1_0 , a=3 , a=2 , a=2 , a=2 , a=True , a=True , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=1_0 , a=0.02 , a=0.9 , a=None , ) -> Optional[Any]:
lowercase__ : str = parent
lowercase__ : int = batch_size
lowercase__ : Union[str, Any] = image_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : Dict = patch_size
lowercase__ : Tuple = tubelet_size
lowercase__ : Optional[int] = num_frames
lowercase__ : Optional[int] = is_training
lowercase__ : int = use_labels
lowercase__ : Optional[int] = hidden_size
lowercase__ : Union[str, Any] = num_hidden_layers
lowercase__ : Optional[int] = num_attention_heads
lowercase__ : Any = intermediate_size
lowercase__ : str = hidden_act
lowercase__ : List[Any] = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : Union[str, Any] = type_sequence_label_size
lowercase__ : List[Any] = initializer_range
lowercase__ : str = mask_ratio
lowercase__ : Optional[Any] = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
lowercase__ : Optional[Any] = (image_size // patch_size) ** 2
lowercase__ : str = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
lowercase__ : str = int(mask_ratio * self.seq_length )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : int = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowercase__ : int = None
if self.use_labels:
lowercase__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Dict = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> Tuple:
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self , a , a , a ) -> Optional[int]:
lowercase__ : Dict = VideoMAEModel(config=a )
model.to(a )
model.eval()
lowercase__ : Tuple = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , a , a , a ) -> Union[str, Any]:
lowercase__ : str = VideoMAEForPreTraining(a )
model.to(a )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowercase__ : Any = torch.ones((self.num_masks,) )
lowercase__ : str = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
lowercase__ : Optional[int] = mask.expand(self.batch_size , -1 ).bool()
lowercase__ : str = model(a , a )
# model only returns predictions for masked patches
lowercase__ : str = mask.sum().item()
lowercase__ : int = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Dict = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = config_and_inputs
lowercase__ : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : Tuple = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
lowerCamelCase__ : Optional[int] = (
{"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
lowerCamelCase__ : Any = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : Union[str, Any] = False
lowerCamelCase__ : str = False
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Optional[Any] = VideoMAEModelTester(self )
lowercase__ : Optional[Any] = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=3_7 )
def _UpperCAmelCase ( self , a , a , a=False ) -> Optional[int]:
lowercase__ : Union[str, Any] = copy.deepcopy(a )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowercase__ : Optional[Any] = torch.ones((self.model_tester.num_masks,) )
lowercase__ : Any = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
lowercase__ : Any = mask.expand(self.model_tester.batch_size , -1 ).bool()
lowercase__ : Union[str, Any] = bool_masked_pos.to(a )
if return_labels:
if model_class in [
*get_values(a ),
]:
lowercase__ : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a )
return inputs_dict
def _UpperCAmelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason='VideoMAE does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> Dict:
pass
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : int = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = model_class(a )
lowercase__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[Any] = [*signature.parameters.keys()]
lowercase__ : int = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a )
@slow
def _UpperCAmelCase ( self ) -> str:
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : List[Any] = VideoMAEModel.from_pretrained(a )
self.assertIsNotNone(a )
def _UpperCAmelCase ( self ) -> Optional[Any]:
if not self.has_attentions:
pass
else:
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : str = True
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = self.model_tester.seq_length - self.model_tester.num_masks
lowercase__ : Any = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
lowercase__ : Optional[Any] = True
lowercase__ : int = False
lowercase__ : Any = True
lowercase__ : List[str] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Optional[int] = model(**self._prepare_for_class(a , a ) )
lowercase__ : Dict = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ : str = True
lowercase__ : List[str] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : List[Any] = model(**self._prepare_for_class(a , a ) )
lowercase__ : Optional[Any] = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowercase__ : List[str] = len(a )
# Check attention is always last and order is fine
lowercase__ : Optional[int] = True
lowercase__ : List[str] = True
lowercase__ : int = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : List[str] = model(**self._prepare_for_class(a , a ) )
self.assertEqual(out_len + 1 , len(a ) )
lowercase__ : int = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _UpperCAmelCase ( self ) -> Optional[int]:
def check_hidden_states_output(a , a , a ):
lowercase__ : Optional[int] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Optional[Any] = model(**self._prepare_for_class(a , a ) )
lowercase__ : Optional[int] = outputs.hidden_states
lowercase__ : List[Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(a ) , a )
lowercase__ : Optional[Any] = self.model_tester.seq_length - self.model_tester.num_masks
lowercase__ : Union[str, Any] = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Tuple = True
check_hidden_states_output(a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Union[str, Any] = True
check_hidden_states_output(a , a , a )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _UpperCAmelCase ( self ) -> List[Any]:
pass
def a_ ( ):
'''simple docstring'''
lowercase__ : int = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
lowercase__ : str = np.load(_lowerCAmelCase )
return list(_lowerCAmelCase )
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
@cached_property
def _UpperCAmelCase ( self ) -> Optional[Any]:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Dict = VideoMAEForVideoClassification.from_pretrained('MCG-NJU/videomae-base-finetuned-kinetics' ).to(
a )
lowercase__ : str = self.default_image_processor
lowercase__ : List[str] = prepare_video()
lowercase__ : int = image_processor(a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ : Union[str, Any] = model(**a )
# verify the logits
lowercase__ : str = torch.Size((1, 4_0_0) )
self.assertEqual(outputs.logits.shape , a )
lowercase__ : List[Any] = torch.tensor([0.3_669, -0.0_688, -0.2_421] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 ) )
@slow
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Optional[int] = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' ).to(a )
lowercase__ : Optional[Any] = self.default_image_processor
lowercase__ : List[str] = prepare_video()
lowercase__ : str = image_processor(a , return_tensors='pt' ).to(a )
# add boolean mask, indicating which patches to mask
lowercase__ : Union[str, Any] = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' )
lowercase__ : str = torch.load(a )
# forward pass
with torch.no_grad():
lowercase__ : List[Any] = model(**a )
# verify the logits
lowercase__ : Dict = torch.Size([1, 1_4_0_8, 1_5_3_6] )
lowercase__ : List[str] = torch.tensor(
[[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] , device=a )
self.assertEqual(outputs.logits.shape , a )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , a , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
lowercase__ : List[Any] = torch.tensor([0.5_142] , device=a )
self.assertTrue(torch.allclose(outputs.loss , a , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
lowercase__ : Tuple = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' , norm_pix_loss=a ).to(
a )
with torch.no_grad():
lowercase__ : Any = model(**a )
lowercase__ : List[Any] = torch.tensor(torch.tensor([0.6_469] ) , device=a )
self.assertTrue(torch.allclose(outputs.loss , a , atol=1e-4 ) )
| 645
| 1
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
_UpperCamelCase : str = logging.get_logger(__name__)
_UpperCamelCase : Union[str, Any] = {"vocab_file": "vocab.txt"}
_UpperCamelCase : List[str] = {
"vocab_file": {
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt",
"YituTech/conv-bert-medium-small": (
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"
),
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt",
}
}
_UpperCamelCase : str = {
"YituTech/conv-bert-base": 5_12,
"YituTech/conv-bert-medium-small": 5_12,
"YituTech/conv-bert-small": 5_12,
}
_UpperCamelCase : int = {
"YituTech/conv-bert-base": {"do_lower_case": True},
"YituTech/conv-bert-medium-small": {"do_lower_case": True},
"YituTech/conv-bert-small": {"do_lower_case": True},
}
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : List[Any] = VOCAB_FILES_NAMES
lowerCamelCase__ : Dict = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : List[Any] = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : Any = ConvBertTokenizer
def __init__( self , a=None , a=None , a=True , a="[UNK]" , a="[SEP]" , a="[PAD]" , a="[CLS]" , a="[MASK]" , a=True , a=None , **a , ) -> Any:
super().__init__(
a , tokenizer_file=a , do_lower_case=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , tokenize_chinese_chars=a , strip_accents=a , **a , )
lowercase__ : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , a ) != do_lower_case
or normalizer_state.get('strip_accents' , a ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , a ) != tokenize_chinese_chars
):
lowercase__ : Dict = getattr(a , normalizer_state.pop('type' ) )
lowercase__ : Any = do_lower_case
lowercase__ : Optional[int] = strip_accents
lowercase__ : Tuple = tokenize_chinese_chars
lowercase__ : Dict = normalizer_class(**a )
lowercase__ : Any = do_lower_case
def _UpperCAmelCase ( self , a , a=None ) -> List[str]:
lowercase__ : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCAmelCase ( self , a , a = None ) -> List[int]:
lowercase__ : Dict = [self.sep_token_id]
lowercase__ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCAmelCase ( self , a , a = None ) -> Tuple[str]:
lowercase__ : List[Any] = self._tokenizer.model.save(a , name=a )
return tuple(a )
| 645
|
"""simple docstring"""
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
_UpperCamelCase : Dict = logging.get_logger(__name__)
_UpperCamelCase : List[Any] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"encoder.layer_norm_for_extract": "layer_norm_for_extract",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"label_embs_concat": "label_embeddings_concat",
"mask_emb": "masked_spec_embed",
"spk_proj": "speaker_proj",
}
_UpperCamelCase : List[str] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"label_embeddings_concat",
"speaker_proj",
"layer_norm_for_extract",
]
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple ):
'''simple docstring'''
for attribute in key.split('.' ):
lowercase__ : Dict = getattr(_lowerCAmelCase , _lowerCAmelCase )
if weight_type is not None:
lowercase__ : Optional[int] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape
else:
lowercase__ : Optional[int] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
lowercase__ : Optional[Any] = value
elif weight_type == "weight_g":
lowercase__ : Dict = value
elif weight_type == "weight_v":
lowercase__ : List[str] = value
elif weight_type == "bias":
lowercase__ : Optional[Any] = value
else:
lowercase__ : List[str] = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : Tuple = []
lowercase__ : List[str] = fairseq_model.state_dict()
lowercase__ : Union[str, Any] = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
lowercase__ : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == 'group' , )
lowercase__ : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
lowercase__ : List[Any] = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
lowercase__ : int = True
if "*" in mapped_key:
lowercase__ : Optional[int] = name.split(_lowerCAmelCase )[0].split('.' )[-2]
lowercase__ : List[str] = mapped_key.replace('*' , _lowerCAmelCase )
if "weight_g" in name:
lowercase__ : List[Any] = 'weight_g'
elif "weight_v" in name:
lowercase__ : int = 'weight_v'
elif "bias" in name:
lowercase__ : Dict = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase__ : Union[str, Any] = 'weight'
else:
lowercase__ : int = None
set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def a_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Dict ):
'''simple docstring'''
lowercase__ : int = full_name.split('conv_layers.' )[-1]
lowercase__ : int = name.split('.' )
lowercase__ : int = int(items[0] )
lowercase__ : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
lowercase__ : Union[str, Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
lowercase__ : Optional[int] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
lowercase__ : List[Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
lowercase__ : int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_lowerCAmelCase )
@torch.no_grad()
def a_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : str=None , _lowerCAmelCase : Tuple=True ):
'''simple docstring'''
if config_path is not None:
lowercase__ : Any = UniSpeechSatConfig.from_pretrained(_lowerCAmelCase )
else:
lowercase__ : Any = UniSpeechSatConfig()
lowercase__ : Union[str, Any] = ''
if is_finetuned:
lowercase__ : Optional[Any] = UniSpeechSatForCTC(_lowerCAmelCase )
else:
lowercase__ : List[Any] = UniSpeechSatForPreTraining(_lowerCAmelCase )
lowercase__ , lowercase__ , lowercase__ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
lowercase__ : Union[str, Any] = model[0].eval()
recursively_load_weights(_lowerCAmelCase , _lowerCAmelCase )
hf_wavavec.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_UpperCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_UpperCamelCase : str = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 645
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_UpperCamelCase : Dict = {
"configuration_swiftformer": [
"SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SwiftFormerConfig",
"SwiftFormerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : List[Any] = [
"SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwiftFormerForImageClassification",
"SwiftFormerModel",
"SwiftFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_UpperCamelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 645
|
"""simple docstring"""
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class UpperCAmelCase_ :
def __init__( self , a , a=1_3 , a=3_2 , a=2 , a=3 , a=1_6 , a=[1, 2, 1] , a=[2, 2, 4] , a=2 , a=2.0 , a=True , a=0.0 , a=0.0 , a=0.1 , a="gelu" , a=False , a=True , a=0.02 , a=1e-5 , a=True , a=None , a=True , a=1_0 , a=8 , a=["stage1", "stage2", "stage3"] , a=[1, 2, 3] , ) -> int:
lowercase__ : int = parent
lowercase__ : Union[str, Any] = batch_size
lowercase__ : Dict = image_size
lowercase__ : str = patch_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : List[str] = embed_dim
lowercase__ : Any = depths
lowercase__ : Dict = num_heads
lowercase__ : List[str] = window_size
lowercase__ : int = mlp_ratio
lowercase__ : Tuple = qkv_bias
lowercase__ : Union[str, Any] = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : Tuple = drop_path_rate
lowercase__ : List[str] = hidden_act
lowercase__ : Optional[Any] = use_absolute_embeddings
lowercase__ : Optional[Any] = patch_norm
lowercase__ : Any = layer_norm_eps
lowercase__ : List[Any] = initializer_range
lowercase__ : List[str] = is_training
lowercase__ : int = scope
lowercase__ : Optional[int] = use_labels
lowercase__ : List[Any] = type_sequence_label_size
lowercase__ : List[str] = encoder_stride
lowercase__ : Optional[Any] = out_features
lowercase__ : Dict = out_indices
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Optional[Any] = None
if self.use_labels:
lowercase__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Tuple = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _UpperCAmelCase ( self , a , a , a ) -> Dict:
lowercase__ : Tuple = MaskFormerSwinModel(config=a )
model.to(a )
model.eval()
lowercase__ : str = model(a )
lowercase__ : str = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowercase__ : Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _UpperCAmelCase ( self , a , a , a ) -> Optional[int]:
lowercase__ : List[Any] = MaskFormerSwinBackbone(config=a )
model.to(a )
model.eval()
lowercase__ : int = model(a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [1_3, 1_6, 1_6, 1_6] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [1_6, 3_2, 6_4] )
# verify ValueError
with self.parent.assertRaises(a ):
lowercase__ : Dict = ['stem']
lowercase__ : List[str] = MaskFormerSwinBackbone(config=a )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : int = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Tuple = config_and_inputs
lowercase__ : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : Optional[int] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : List[str] = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
lowerCamelCase__ : str = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : int = False
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : str = MaskFormerSwinModelTester(self )
lowercase__ : Tuple = ConfigTester(self , config_class=a , embed_dim=3_7 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'
' `nn.DataParallel`'
) )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
def _UpperCAmelCase ( self ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCAmelCase ( self ) -> str:
return
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*a )
@unittest.skip('Swin does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> Tuple:
pass
@unittest.skip('Swin does not support feedforward chunking' )
def _UpperCAmelCase ( self ) -> Tuple:
pass
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def _UpperCAmelCase ( self ) -> str:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Any = model_class(a )
lowercase__ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[Any] = [*signature.parameters.keys()]
lowercase__ : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
@unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' )
def _UpperCAmelCase ( self ) -> List[Any]:
pass
@unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' )
def _UpperCAmelCase ( self ) -> int:
pass
def _UpperCAmelCase ( self , a , a , a , a ) -> Tuple:
lowercase__ : Dict = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : str = model(**self._prepare_for_class(a , a ) )
lowercase__ : List[Any] = outputs.hidden_states
lowercase__ : str = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(a ) , a )
# Swin has a different seq_length
lowercase__ : Dict = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowercase__ : List[str] = True
self.check_hidden_states_output(a , a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : List[str] = True
self.check_hidden_states_output(a , a , a , a )
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Union[str, Any] = 3
lowercase__ : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase__ : Tuple = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase__ : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowercase__ : List[str] = True
self.check_hidden_states_output(a , a , a , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : int = True
self.check_hidden_states_output(a , a , a , (padded_height, padded_width) )
@unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def _UpperCAmelCase ( self ) -> Any:
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def _UpperCAmelCase ( self ) -> Any:
pass
def _UpperCAmelCase ( self ) -> Any:
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(a ):
lowercase__ : Union[str, Any] = 0
return t
def check_equivalence(a , a , a , a={} ):
with torch.no_grad():
lowercase__ : Optional[Any] = model(**a , return_dict=a , **a )
lowercase__ : Optional[int] = model(**a , return_dict=a , **a ).to_tuple()
def recursive_check(a , a ):
if isinstance(a , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(a , a ):
recursive_check(a , a )
elif isinstance(a , a ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(a , a )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(a ) , set_nan_tensor_to_zero(a ) , atol=1e-5 ) , msg=(
'Tuple and dict output are not equal. Difference:'
f""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"""
f""" {torch.isnan(a ).any()} and `inf`: {torch.isinf(a )}. Dict has"""
f""" `nan`: {torch.isnan(a ).any()} and `inf`: {torch.isinf(a )}."""
) , )
recursive_check(a , a )
for model_class in self.all_model_classes:
lowercase__ : Any = model_class(a )
model.to(a )
model.eval()
lowercase__ : Tuple = self._prepare_for_class(a , a )
lowercase__ : Optional[Any] = self._prepare_for_class(a , a )
check_equivalence(a , a , a )
lowercase__ : Any = self._prepare_for_class(a , a , return_labels=a )
lowercase__ : List[Any] = self._prepare_for_class(a , a , return_labels=a )
check_equivalence(a , a , a )
lowercase__ : Any = self._prepare_for_class(a , a )
lowercase__ : int = self._prepare_for_class(a , a )
check_equivalence(a , a , a , {'output_hidden_states': True} )
lowercase__ : Dict = self._prepare_for_class(a , a , return_labels=a )
lowercase__ : Optional[int] = self._prepare_for_class(a , a , return_labels=a )
check_equivalence(a , a , a , {'output_hidden_states': True} )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase , _a):
lowerCamelCase__ : Dict = (MaskFormerSwinBackbone,) if is_torch_available() else ()
lowerCamelCase__ : Optional[int] = MaskFormerSwinConfig
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ : Optional[int] = MaskFormerSwinModelTester(self )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : int = inputs_dict['pixel_values'].shape[0]
for backbone_class in self.all_model_classes:
lowercase__ : Optional[Any] = backbone_class(a )
backbone.to(a )
backbone.eval()
lowercase__ : Union[str, Any] = backbone(**a )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , a )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
lowercase__ : List[str] = backbone(**a , output_hidden_states=a )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
lowercase__ , lowercase__ , lowercase__ : int = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
lowercase__ : List[Any] = backbone(**a , output_attentions=a )
self.assertIsNotNone(outputs.attentions )
| 645
| 1
|
"""simple docstring"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class UpperCAmelCase_ ( _a):
def __init__( self , a , a=None , a=None , a=0 ) -> Any:
lowercase__ : Optional[Any] = 1.0 if scale is None else scale
lowercase__ : Optional[Any] = 0.0 if loc is None else loc
super().__init__(a , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=a )] )
@property
def _UpperCAmelCase ( self ) -> List[Any]:
return self.base_dist.mean * self.scale + self.loc
@property
def _UpperCAmelCase ( self ) -> List[str]:
return self.base_dist.variance * self.scale**2
@property
def _UpperCAmelCase ( self ) -> Any:
return self.variance.sqrt()
class UpperCAmelCase_ ( nn.Module):
def __init__( self , a , a , a , **a ) -> None:
super().__init__(**a )
lowercase__ : Tuple = args_dim
lowercase__ : Optional[int] = nn.ModuleList([nn.Linear(a , a ) for dim in args_dim.values()] )
lowercase__ : str = domain_map
def _UpperCAmelCase ( self , a ) -> Tuple[torch.Tensor]:
lowercase__ : Union[str, Any] = [proj(a ) for proj in self.proj]
return self.domain_map(*a )
class UpperCAmelCase_ ( nn.Module):
def __init__( self , a ) -> int:
super().__init__()
lowercase__ : Any = function
def _UpperCAmelCase ( self , a , *a ) -> List[str]:
return self.function(a , *a )
class UpperCAmelCase_ :
lowerCamelCase__ : type
lowerCamelCase__ : int
lowerCamelCase__ : Dict[str, int]
def __init__( self , a = 1 ) -> None:
lowercase__ : Optional[int] = dim
lowercase__ : Tuple = {k: dim * self.args_dim[k] for k in self.args_dim}
def _UpperCAmelCase ( self , a ) -> List[str]:
if self.dim == 1:
return self.distribution_class(*a )
else:
return Independent(self.distribution_class(*a ) , 1 )
def _UpperCAmelCase ( self , a , a = None , a = None , ) -> Distribution:
lowercase__ : Optional[int] = self._base_distribution(a )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(a , loc=a , scale=a , event_dim=self.event_dim )
@property
def _UpperCAmelCase ( self ) -> Tuple:
return () if self.dim == 1 else (self.dim,)
@property
def _UpperCAmelCase ( self ) -> int:
return len(self.event_shape )
@property
def _UpperCAmelCase ( self ) -> float:
return 0.0
def _UpperCAmelCase ( self , a ) -> nn.Module:
return ParameterProjection(
in_features=a , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def _UpperCAmelCase ( self , *a ) -> Any:
raise NotImplementedError()
@staticmethod
def _UpperCAmelCase ( a ) -> torch.Tensor:
return (x + torch.sqrt(torch.square(a ) + 4.0 )) / 2.0
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
lowerCamelCase__ : type = StudentT
@classmethod
def _UpperCAmelCase ( cls , a , a , a ) -> str:
lowercase__ : str = cls.squareplus(a ).clamp_min(torch.finfo(scale.dtype ).eps )
lowercase__ : Dict = 2.0 + cls.squareplus(a )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : Dict[str, int] = {"loc": 1, "scale": 1}
lowerCamelCase__ : type = Normal
@classmethod
def _UpperCAmelCase ( cls , a , a ) -> Tuple:
lowercase__ : Union[str, Any] = cls.squareplus(a ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : Dict[str, int] = {"total_count": 1, "logits": 1}
lowerCamelCase__ : type = NegativeBinomial
@classmethod
def _UpperCAmelCase ( cls , a , a ) -> str:
lowercase__ : List[str] = cls.squareplus(a )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def _UpperCAmelCase ( self , a ) -> Distribution:
lowercase__ , lowercase__ : Any = distr_args
if self.dim == 1:
return self.distribution_class(total_count=a , logits=a )
else:
return Independent(self.distribution_class(total_count=a , logits=a ) , 1 )
def _UpperCAmelCase ( self , a , a = None , a = None ) -> Distribution:
lowercase__ , lowercase__ : int = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 645
|
"""simple docstring"""
import math
def a_ ( _lowerCAmelCase : int = 100 ):
'''simple docstring'''
lowercase__ : Union[str, Any] = sum(i * i for i in range(1 , n + 1 ) )
lowercase__ : str = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 645
| 1
|
"""simple docstring"""
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
_UpperCamelCase : Tuple = logging.get_logger(__name__)
_UpperCamelCase : Any = "T5Config"
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : Optional[Any] = "mt5"
lowerCamelCase__ : Tuple = MTaConfig
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : List[str] = "mt5"
lowerCamelCase__ : str = MTaConfig
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : int = "mt5"
lowerCamelCase__ : List[Any] = MTaConfig
| 645
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ , lowercase__ : str = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-canny' , from_pt=a , dtype=jnp.bfloataa )
lowercase__ , lowercase__ : List[str] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=a , from_pt=a , dtype=jnp.bfloataa )
lowercase__ : List[Any] = controlnet_params
lowercase__ : int = 'bird'
lowercase__ : List[Any] = jax.device_count()
lowercase__ : Dict = pipe.prepare_text_inputs([prompts] * num_samples )
lowercase__ : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' )
lowercase__ : Optional[int] = pipe.prepare_image_inputs([canny_image] * num_samples )
lowercase__ : List[Any] = jax.random.PRNGKey(0 )
lowercase__ : Tuple = jax.random.split(a , jax.device_count() )
lowercase__ : str = replicate(a )
lowercase__ : List[str] = shard(a )
lowercase__ : Dict = shard(a )
lowercase__ : List[Any] = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=5_0 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3)
lowercase__ : Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowercase__ : Tuple = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
lowercase__ : int = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase__ : Optional[Any] = jnp.array(
[0.167_969, 0.116_699, 0.081_543, 0.154_297, 0.132_812, 0.108_887, 0.169_922, 0.169_922, 0.205_078] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ , lowercase__ : int = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-openpose' , from_pt=a , dtype=jnp.bfloataa )
lowercase__ , lowercase__ : Optional[Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=a , from_pt=a , dtype=jnp.bfloataa )
lowercase__ : Optional[Any] = controlnet_params
lowercase__ : List[Any] = 'Chef in the kitchen'
lowercase__ : List[str] = jax.device_count()
lowercase__ : Dict = pipe.prepare_text_inputs([prompts] * num_samples )
lowercase__ : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' )
lowercase__ : Optional[int] = pipe.prepare_image_inputs([pose_image] * num_samples )
lowercase__ : List[str] = jax.random.PRNGKey(0 )
lowercase__ : str = jax.random.split(a , jax.device_count() )
lowercase__ : Optional[Any] = replicate(a )
lowercase__ : Optional[Any] = shard(a )
lowercase__ : List[Any] = shard(a )
lowercase__ : List[Any] = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=5_0 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3)
lowercase__ : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowercase__ : List[str] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
lowercase__ : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase__ : str = jnp.array(
[[0.271_484, 0.261_719, 0.275_391, 0.277_344, 0.279_297, 0.291_016, 0.294_922, 0.302_734, 0.302_734]] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 645
| 1
|
"""simple docstring"""
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Optional[Any] = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
lowercase__ : Union[str, Any] = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
model.to(a )
from datasets import load_dataset
lowercase__ : str = load_dataset('nielsr/rvlcdip-demo' )
lowercase__ : Tuple = dataset['train'][0]['image'].convert('RGB' )
lowercase__ : int = image_processor(a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ : List[str] = model(**a )
lowercase__ : List[Any] = outputs.logits
lowercase__ : Union[str, Any] = torch.Size((1, 1_6) )
self.assertEqual(logits.shape , a )
lowercase__ : Tuple = torch.tensor(
[-0.4_158, -0.4_092, -0.4_347] , device=a , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , a , atol=1e-4 ) )
| 645
|
"""simple docstring"""
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 645
| 1
|
"""simple docstring"""
import functools
from typing import Any
def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : list[str] ):
'''simple docstring'''
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or len(_lowerCAmelCase ) == 0:
raise ValueError('the string should be not empty string' )
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or not all(
isinstance(_lowerCAmelCase , _lowerCAmelCase ) and len(_lowerCAmelCase ) > 0 for item in words ):
raise ValueError('the words should be a list of non-empty strings' )
# Build trie
lowercase__ : dict[str, Any] = {}
lowercase__ : str = 'WORD_KEEPER'
for word in words:
lowercase__ : int = trie
for c in word:
if c not in trie_node:
lowercase__ : Optional[Any] = {}
lowercase__ : Tuple = trie_node[c]
lowercase__ : Optional[int] = True
lowercase__ : Union[str, Any] = len(_lowerCAmelCase )
# Dynamic programming method
@functools.cache
def is_breakable(_lowerCAmelCase : int ) -> bool:
if index == len_string:
return True
lowercase__ : Union[str, Any] = trie
for i in range(_lowerCAmelCase , _lowerCAmelCase ):
lowercase__ : Any = trie_node.get(string[i] , _lowerCAmelCase )
if trie_node is None:
return False
if trie_node.get(_lowerCAmelCase , _lowerCAmelCase ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 645
|
"""simple docstring"""
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Optional[Any] = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
lowercase__ : Union[str, Any] = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
model.to(a )
from datasets import load_dataset
lowercase__ : str = load_dataset('nielsr/rvlcdip-demo' )
lowercase__ : Tuple = dataset['train'][0]['image'].convert('RGB' )
lowercase__ : int = image_processor(a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ : List[str] = model(**a )
lowercase__ : List[Any] = outputs.logits
lowercase__ : Union[str, Any] = torch.Size((1, 1_6) )
self.assertEqual(logits.shape , a )
lowercase__ : Tuple = torch.tensor(
[-0.4_158, -0.4_092, -0.4_347] , device=a , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , a , atol=1e-4 ) )
| 645
| 1
|
"""simple docstring"""
from __future__ import annotations
def a_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : list[list[int]] = []
lowercase__ : list[int] = []
lowercase__ : Tuple = 0
lowercase__ : int = sum(_lowerCAmelCase )
create_state_space_tree(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return result
def a_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : list[int] , _lowerCAmelCase : list[list[int]] , _lowerCAmelCase : int , ):
'''simple docstring'''
if sum(_lowerCAmelCase ) > max_sum or (remaining_nums_sum + sum(_lowerCAmelCase )) < max_sum:
return
if sum(_lowerCAmelCase ) == max_sum:
result.append(_lowerCAmelCase )
return
for index in range(_lowerCAmelCase , len(_lowerCAmelCase ) ):
create_state_space_tree(
_lowerCAmelCase , _lowerCAmelCase , index + 1 , [*path, nums[index]] , _lowerCAmelCase , remaining_nums_sum - nums[index] , )
_UpperCamelCase : Any = [3, 34, 4, 12, 5, 2]
_UpperCamelCase : List[str] = 9
_UpperCamelCase : Dict = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 645
|
"""simple docstring"""
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase_ :
@staticmethod
def _UpperCAmelCase ( *a , **a ) -> int:
pass
def a_ ( _lowerCAmelCase : Image ):
'''simple docstring'''
lowercase__ : List[str] = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
lowerCamelCase__ : Union[str, Any] = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def _UpperCAmelCase ( self , a , a , a ) -> Dict:
lowercase__ : Union[str, Any] = DepthEstimationPipeline(model=a , image_processor=a )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def _UpperCAmelCase ( self , a , a ) -> Optional[int]:
lowercase__ : Tuple = depth_estimator('./tests/fixtures/tests_samples/COCO/000000039769.png' )
self.assertEqual({'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )} , a )
import datasets
lowercase__ : Tuple = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' )
lowercase__ : List[Any] = depth_estimator(
[
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
] )
self.assertEqual(
[
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
] , a , )
@require_tf
@unittest.skip('Depth estimation is not implemented in TF' )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
@slow
@require_torch
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Tuple = 'Intel/dpt-large'
lowercase__ : Optional[int] = pipeline('depth-estimation' , model=a )
lowercase__ : List[Any] = depth_estimator('http://images.cocodataset.org/val2017/000000039769.jpg' )
lowercase__ : Optional[Any] = hashimage(outputs['depth'] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['predicted_depth'].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs['predicted_depth'].min().item() ) , 2.662 )
@require_torch
def _UpperCAmelCase ( self ) -> Optional[int]:
# This is highly irregular to have no small tests.
self.skipTest('There is not hf-internal-testing tiny model for either GLPN nor DPT' )
| 645
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_UpperCamelCase : str = logging.get_logger(__name__)
_UpperCamelCase : List[Any] = "▁"
_UpperCamelCase : List[str] = {"vocab_file": "sentencepiece.bpe.model"}
_UpperCamelCase : int = {
"vocab_file": {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"
),
}
}
_UpperCamelCase : List[str] = {
"xlm-roberta-base": 5_12,
"xlm-roberta-large": 5_12,
"xlm-roberta-large-finetuned-conll02-dutch": 5_12,
"xlm-roberta-large-finetuned-conll02-spanish": 5_12,
"xlm-roberta-large-finetuned-conll03-english": 5_12,
"xlm-roberta-large-finetuned-conll03-german": 5_12,
}
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : Tuple = VOCAB_FILES_NAMES
lowerCamelCase__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : Any = ["input_ids", "attention_mask"]
def __init__( self , a , a="<s>" , a="</s>" , a="</s>" , a="<s>" , a="<unk>" , a="<pad>" , a="<mask>" , a = None , **a , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
lowercase__ : Optional[int] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
lowercase__ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a , eos_token=a , unk_token=a , sep_token=a , cls_token=a , pad_token=a , mask_token=a , sp_model_kwargs=self.sp_model_kwargs , **a , )
lowercase__ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(a ) )
lowercase__ : int = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase__ : int = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase__ : Optional[int] = 1
lowercase__ : List[str] = len(self.sp_model ) + self.fairseq_offset
lowercase__ : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> List[Any]:
lowercase__ : List[str] = self.__dict__.copy()
lowercase__ : int = None
lowercase__ : List[str] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , a ) -> List[Any]:
lowercase__ : List[Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowercase__ : Tuple = {}
lowercase__ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _UpperCAmelCase ( self , a , a = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ : Optional[int] = [self.cls_token_id]
lowercase__ : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _UpperCAmelCase ( self , a , a = None , a = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
if token_ids_a is None:
return [1] + ([0] * len(a )) + [1]
return [1] + ([0] * len(a )) + [1, 1] + ([0] * len(a )) + [1]
def _UpperCAmelCase ( self , a , a = None ) -> List[int]:
lowercase__ : List[Any] = [self.sep_token_id]
lowercase__ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _UpperCAmelCase ( self ) -> Optional[int]:
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : List[str] = {self.convert_ids_to_tokens(a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _UpperCAmelCase ( self , a ) -> List[str]:
return self.sp_model.encode(a , out_type=a )
def _UpperCAmelCase ( self , a ) -> Dict:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase__ : Union[str, Any] = self.sp_model.PieceToId(a )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _UpperCAmelCase ( self , a ) -> int:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _UpperCAmelCase ( self , a ) -> str:
lowercase__ : List[Any] = ''.join(a ).replace(a , ' ' ).strip()
return out_string
def _UpperCAmelCase ( self , a , a = None ) -> Tuple[str]:
if not os.path.isdir(a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ : Optional[Any] = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a )
elif not os.path.isfile(self.vocab_file ):
with open(a , 'wb' ) as fi:
lowercase__ : Any = self.sp_model.serialized_model_proto()
fi.write(a )
return (out_vocab_file,)
| 645
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class UpperCAmelCase_ ( _a):
def __init__( self ) -> Any:
lowercase__ : Tuple = []
def _UpperCAmelCase ( self , a , a , a , **a ) -> Any:
self.events.append('on_init_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Optional[int]:
self.events.append('on_train_begin' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> List[str]:
self.events.append('on_train_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> int:
self.events.append('on_epoch_begin' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Optional[Any]:
self.events.append('on_epoch_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> int:
self.events.append('on_step_begin' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> str:
self.events.append('on_step_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> int:
self.events.append('on_evaluate' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Tuple:
self.events.append('on_predict' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Union[str, Any]:
self.events.append('on_save' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> List[str]:
self.events.append('on_log' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Any:
self.events.append('on_prediction_step' )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self ) -> str:
lowercase__ : str = tempfile.mkdtemp()
def _UpperCAmelCase ( self ) -> Dict:
shutil.rmtree(self.output_dir )
def _UpperCAmelCase ( self , a=0 , a=0 , a=6_4 , a=6_4 , a=None , a=False , **a ) -> int:
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
lowercase__ : str = RegressionDataset(length=a )
lowercase__ : Any = RegressionDataset(length=a )
lowercase__ : Optional[Any] = RegressionModelConfig(a=a , b=a )
lowercase__ : Union[str, Any] = RegressionPreTrainedModel(a )
lowercase__ : Tuple = TrainingArguments(self.output_dir , disable_tqdm=a , report_to=[] , **a )
return Trainer(
a , a , train_dataset=a , eval_dataset=a , callbacks=a , )
def _UpperCAmelCase ( self , a , a ) -> Union[str, Any]:
self.assertEqual(len(a ) , len(a ) )
# Order doesn't matter
lowercase__ : Optional[int] = sorted(a , key=lambda a : cb.__name__ if isinstance(a , a ) else cb.__class__.__name__ )
lowercase__ : Tuple = sorted(a , key=lambda a : cb.__name__ if isinstance(a , a ) else cb.__class__.__name__ )
for cba, cba in zip(a , a ):
if isinstance(a , a ) and isinstance(a , a ):
self.assertEqual(a , a )
elif isinstance(a , a ) and not isinstance(a , a ):
self.assertEqual(a , cba.__class__ )
elif not isinstance(a , a ) and isinstance(a , a ):
self.assertEqual(cba.__class__ , a )
else:
self.assertEqual(a , a )
def _UpperCAmelCase ( self , a ) -> Optional[Any]:
lowercase__ : Dict = ['on_init_end', 'on_train_begin']
lowercase__ : List[Any] = 0
lowercase__ : Optional[int] = len(trainer.get_eval_dataloader() )
lowercase__ : Tuple = ['on_prediction_step'] * len(trainer.get_eval_dataloader() ) + ['on_log', 'on_evaluate']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('on_epoch_begin' )
for _ in range(a ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('on_log' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('on_save' )
expected_events.append('on_epoch_end' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : int = self.get_trainer()
lowercase__ : str = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
# Callbacks passed at init are added to the default callbacks
lowercase__ : str = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
lowercase__ : List[Any] = self.get_trainer(disable_tqdm=a )
lowercase__ : Optional[Any] = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : int = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
lowercase__ : List[str] = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(a )
expected_callbacks.remove(a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
lowercase__ : Optional[Any] = self.get_trainer()
lowercase__ : List[Any] = trainer.pop_callback(a )
self.assertEqual(cb.__class__ , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
trainer.add_callback(a )
expected_callbacks.insert(0 , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
# We can also add, pop, or remove by instance
lowercase__ : int = self.get_trainer()
lowercase__ : List[str] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(a )
expected_callbacks.remove(a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
lowercase__ : Tuple = self.get_trainer()
lowercase__ : Dict = trainer.callback_handler.callbacks[0]
lowercase__ : Union[str, Any] = trainer.pop_callback(a )
self.assertEqual(a , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
trainer.add_callback(a )
expected_callbacks.insert(0 , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
def _UpperCAmelCase ( self ) -> Tuple:
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='ignore' , category=a )
lowercase__ : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
lowercase__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
# Independent log/save/eval
lowercase__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
lowercase__ : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
lowercase__ : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
lowercase__ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
lowercase__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='steps' )
trainer.train()
lowercase__ : Optional[int] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
lowercase__ : int = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='epoch' )
trainer.train()
lowercase__ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
# A bit of everything
lowercase__ : Any = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=1_0 , eval_steps=5 , evaluation_strategy='steps' , )
trainer.train()
lowercase__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
# warning should be emitted for duplicated callbacks
with patch('transformers.trainer_callback.logger.warning' ) as warn_mock:
lowercase__ : str = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(a ) in warn_mock.call_args[0][0]
| 645
| 1
|
"""simple docstring"""
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
_UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
# General docstring
_UpperCamelCase : Optional[int] = "ResNetConfig"
# Base docstring
_UpperCamelCase : List[str] = "microsoft/resnet-50"
_UpperCamelCase : Any = [1, 20_48, 7, 7]
# Image classification docstring
_UpperCamelCase : str = "microsoft/resnet-50"
_UpperCamelCase : Any = "tiger cat"
_UpperCamelCase : Any = [
"microsoft/resnet-50",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class UpperCAmelCase_ ( nn.Module):
def __init__( self , a , a , a = 3 , a = 1 , a = "relu" ) -> Tuple:
super().__init__()
lowercase__ : List[Any] = nn.Convad(
a , a , kernel_size=a , stride=a , padding=kernel_size // 2 , bias=a )
lowercase__ : List[str] = nn.BatchNormad(a )
lowercase__ : str = ACTaFN[activation] if activation is not None else nn.Identity()
def _UpperCAmelCase ( self , a ) -> Tensor:
lowercase__ : Dict = self.convolution(a )
lowercase__ : Any = self.normalization(a )
lowercase__ : Any = self.activation(a )
return hidden_state
class UpperCAmelCase_ ( nn.Module):
def __init__( self , a ) -> Tuple:
super().__init__()
lowercase__ : List[Any] = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
lowercase__ : Union[str, Any] = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
lowercase__ : Any = config.num_channels
def _UpperCAmelCase ( self , a ) -> Tensor:
lowercase__ : Optional[Any] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
lowercase__ : Dict = self.embedder(a )
lowercase__ : Dict = self.pooler(a )
return embedding
class UpperCAmelCase_ ( nn.Module):
def __init__( self , a , a , a = 2 ) -> List[str]:
super().__init__()
lowercase__ : Union[str, Any] = nn.Convad(a , a , kernel_size=1 , stride=a , bias=a )
lowercase__ : Union[str, Any] = nn.BatchNormad(a )
def _UpperCAmelCase ( self , a ) -> Tensor:
lowercase__ : Union[str, Any] = self.convolution(a )
lowercase__ : Optional[Any] = self.normalization(a )
return hidden_state
class UpperCAmelCase_ ( nn.Module):
def __init__( self , a , a , a = 1 , a = "relu" ) -> Dict:
super().__init__()
lowercase__ : Optional[Any] = in_channels != out_channels or stride != 1
lowercase__ : Union[str, Any] = (
ResNetShortCut(a , a , stride=a ) if should_apply_shortcut else nn.Identity()
)
lowercase__ : List[Any] = nn.Sequential(
ResNetConvLayer(a , a , stride=a ) , ResNetConvLayer(a , a , activation=a ) , )
lowercase__ : Dict = ACTaFN[activation]
def _UpperCAmelCase ( self , a ) -> List[Any]:
lowercase__ : List[str] = hidden_state
lowercase__ : int = self.layer(a )
lowercase__ : Tuple = self.shortcut(a )
hidden_state += residual
lowercase__ : Tuple = self.activation(a )
return hidden_state
class UpperCAmelCase_ ( nn.Module):
def __init__( self , a , a , a = 1 , a = "relu" , a = 4 ) -> Any:
super().__init__()
lowercase__ : List[Any] = in_channels != out_channels or stride != 1
lowercase__ : int = out_channels // reduction
lowercase__ : Tuple = (
ResNetShortCut(a , a , stride=a ) if should_apply_shortcut else nn.Identity()
)
lowercase__ : Any = nn.Sequential(
ResNetConvLayer(a , a , kernel_size=1 ) , ResNetConvLayer(a , a , stride=a ) , ResNetConvLayer(a , a , kernel_size=1 , activation=a ) , )
lowercase__ : str = ACTaFN[activation]
def _UpperCAmelCase ( self , a ) -> Any:
lowercase__ : Optional[int] = hidden_state
lowercase__ : str = self.layer(a )
lowercase__ : List[Any] = self.shortcut(a )
hidden_state += residual
lowercase__ : List[Any] = self.activation(a )
return hidden_state
class UpperCAmelCase_ ( nn.Module):
def __init__( self , a , a , a , a = 2 , a = 2 , ) -> Union[str, Any]:
super().__init__()
lowercase__ : Dict = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer
lowercase__ : List[Any] = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(a , a , stride=a , activation=config.hidden_act ) , *[layer(a , a , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def _UpperCAmelCase ( self , a ) -> Tensor:
lowercase__ : str = input
for layer in self.layers:
lowercase__ : Tuple = layer(a )
return hidden_state
class UpperCAmelCase_ ( nn.Module):
def __init__( self , a ) -> Optional[Any]:
super().__init__()
lowercase__ : Tuple = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
a , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
lowercase__ : Optional[int] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(a , config.depths[1:] ):
self.stages.append(ResNetStage(a , a , a , depth=a ) )
def _UpperCAmelCase ( self , a , a = False , a = True ) -> BaseModelOutputWithNoAttention:
lowercase__ : Union[str, Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase__ : int = hidden_states + (hidden_state,)
lowercase__ : Optional[int] = stage_module(a )
if output_hidden_states:
lowercase__ : Tuple = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=a , hidden_states=a , )
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : Tuple = ResNetConfig
lowerCamelCase__ : Union[str, Any] = "resnet"
lowerCamelCase__ : int = "pixel_values"
lowerCamelCase__ : Any = True
def _UpperCAmelCase ( self , a ) -> Union[str, Any]:
if isinstance(a , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(a , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def _UpperCAmelCase ( self , a , a=False ) -> int:
if isinstance(a , a ):
lowercase__ : Any = value
_UpperCamelCase : str = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
_UpperCamelCase : Tuple = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"The bare ResNet model outputting raw features without any specific head on top." , _a , )
class UpperCAmelCase_ ( _a):
def __init__( self , a ) -> List[Any]:
super().__init__(a )
lowercase__ : str = config
lowercase__ : Dict = ResNetEmbeddings(a )
lowercase__ : Dict = ResNetEncoder(a )
lowercase__ : str = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(a )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=a , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _UpperCAmelCase ( self , a , a = None , a = None ) -> BaseModelOutputWithPoolingAndNoAttention:
lowercase__ : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ : Tuple = self.embedder(a )
lowercase__ : int = self.encoder(
a , output_hidden_states=a , return_dict=a )
lowercase__ : Optional[int] = encoder_outputs[0]
lowercase__ : Optional[Any] = self.pooler(a )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=a , pooler_output=a , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , _a , )
class UpperCAmelCase_ ( _a):
def __init__( self , a ) -> List[Any]:
super().__init__(a )
lowercase__ : Any = config.num_labels
lowercase__ : Dict = ResNetModel(a )
# classification head
lowercase__ : Optional[Any] = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(a )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=a , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _UpperCAmelCase ( self , a = None , a = None , a = None , a = None , ) -> ImageClassifierOutputWithNoAttention:
lowercase__ : Dict = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ : Dict = self.resnet(a , output_hidden_states=a , return_dict=a )
lowercase__ : Union[str, Any] = outputs.pooler_output if return_dict else outputs[1]
lowercase__ : Tuple = self.classifier(a )
lowercase__ : List[str] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase__ : List[Any] = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase__ : Union[str, Any] = 'single_label_classification'
else:
lowercase__ : Optional[int] = 'multi_label_classification'
if self.config.problem_type == "regression":
lowercase__ : Any = MSELoss()
if self.num_labels == 1:
lowercase__ : Any = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowercase__ : Dict = loss_fct(a , a )
elif self.config.problem_type == "single_label_classification":
lowercase__ : List[str] = CrossEntropyLoss()
lowercase__ : List[str] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase__ : Optional[int] = BCEWithLogitsLoss()
lowercase__ : List[str] = loss_fct(a , a )
if not return_dict:
lowercase__ : List[Any] = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=a , logits=a , hidden_states=outputs.hidden_states )
@add_start_docstrings(
"\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n " , _a , )
class UpperCAmelCase_ ( _a , _a):
def __init__( self , a ) -> Any:
super().__init__(a )
super()._init_backbone(a )
lowercase__ : List[str] = [config.embedding_size] + config.hidden_sizes
lowercase__ : Tuple = ResNetEmbeddings(a )
lowercase__ : Optional[Any] = ResNetEncoder(a )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(a )
@replace_return_docstrings(output_type=a , config_class=_CONFIG_FOR_DOC )
def _UpperCAmelCase ( self , a , a = None , a = None ) -> BackboneOutput:
lowercase__ : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ : List[str] = self.embedder(a )
lowercase__ : List[str] = self.encoder(a , output_hidden_states=a , return_dict=a )
lowercase__ : Union[str, Any] = outputs.hidden_states
lowercase__ : Dict = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
lowercase__ : List[Any] = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=a , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=a , )
| 645
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_UpperCamelCase : str = {
"configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Tuple = [
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoForQuestionAnswering",
"GPTNeoForSequenceClassification",
"GPTNeoForTokenClassification",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Dict = [
"FlaxGPTNeoForCausalLM",
"FlaxGPTNeoModel",
"FlaxGPTNeoPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
_UpperCamelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 645
| 1
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
_UpperCamelCase : Any = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : List[Any] = "upernet"
def __init__( self , a=None , a=5_1_2 , a=0.02 , a=[1, 2, 3, 6] , a=True , a=0.4 , a=3_8_4 , a=2_5_6 , a=1 , a=False , a=2_5_5 , **a , ) -> List[Any]:
super().__init__(**a )
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
lowercase__ : Tuple = CONFIG_MAPPING['resnet'](out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
elif isinstance(a , a ):
lowercase__ : Any = backbone_config.get('model_type' )
lowercase__ : Any = CONFIG_MAPPING[backbone_model_type]
lowercase__ : str = config_class.from_dict(a )
lowercase__ : Optional[Any] = backbone_config
lowercase__ : Any = hidden_size
lowercase__ : Dict = initializer_range
lowercase__ : Tuple = pool_scales
lowercase__ : List[Any] = use_auxiliary_head
lowercase__ : Any = auxiliary_loss_weight
lowercase__ : List[Any] = auxiliary_in_channels
lowercase__ : Any = auxiliary_channels
lowercase__ : str = auxiliary_num_convs
lowercase__ : Tuple = auxiliary_concat_input
lowercase__ : List[Any] = loss_ignore_index
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Any = copy.deepcopy(self.__dict__ )
lowercase__ : Union[str, Any] = self.backbone_config.to_dict()
lowercase__ : Optional[Any] = self.__class__.model_type
return output
| 645
|
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self , a ) -> str:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
lowercase__ : str = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(a )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Dict = 'sshleifer/tiny-gpt2'
lowercase__ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a , multi_process=a , )
lowercase__ : str = TensorFlowBenchmark(a )
lowercase__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : List[str] = 'sgugger/tiny-distilbert-classification'
lowercase__ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , only_pretrain_model=a , )
lowercase__ : Optional[Any] = TensorFlowBenchmark(a )
lowercase__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : Optional[int] = 'sshleifer/tiny-gpt2'
lowercase__ : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : Optional[Any] = TensorFlowBenchmark(a )
lowercase__ : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : Any = 'sshleifer/tiny-gpt2'
lowercase__ : List[Any] = AutoConfig.from_pretrained(a )
lowercase__ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a , multi_process=a , )
lowercase__ : Tuple = TensorFlowBenchmark(a , [config] )
lowercase__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Optional[Any] = 'sshleifer/tiny-gpt2'
lowercase__ : List[str] = AutoConfig.from_pretrained(a )
lowercase__ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : List[str] = TensorFlowBenchmark(a , [config] )
lowercase__ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Optional[Any] = 'sshleifer/tiny-gpt2'
lowercase__ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : Optional[Any] = TensorFlowBenchmark(a )
lowercase__ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Optional[Any] = 'sshleifer/tiny-gpt2'
lowercase__ : Optional[int] = AutoConfig.from_pretrained(a )
lowercase__ : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : str = TensorFlowBenchmark(a , [config] )
lowercase__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : List[str] = 'patrickvonplaten/t5-tiny-random'
lowercase__ : Any = AutoConfig.from_pretrained(a )
lowercase__ : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : int = TensorFlowBenchmark(a , configs=[config] )
lowercase__ : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 , 'Cannot do xla on CPU.' )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Any = 'sshleifer/tiny-gpt2'
lowercase__ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , use_xla=a , multi_process=a , )
lowercase__ : Any = TensorFlowBenchmark(a )
lowercase__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Any = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=a , save_to_csv=a , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(a , 'inf_time.csv' ) , inference_memory_csv_file=os.path.join(a , 'inf_mem.csv' ) , env_info_csv_file=os.path.join(a , 'env.csv' ) , multi_process=a , )
lowercase__ : Union[str, Any] = TensorFlowBenchmark(a )
benchmark.run()
self.assertTrue(Path(os.path.join(a , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(a , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(a , 'env.csv' ) ).exists() )
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ : Tuple = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(a ):
self.assertTrue(hasattr(a , 'sequential' ) )
self.assertTrue(hasattr(a , 'cumulative' ) )
self.assertTrue(hasattr(a , 'current' ) )
self.assertTrue(hasattr(a , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=a , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(a , 'log.txt' ) , log_print=a , trace_memory_line_by_line=a , eager_mode=a , multi_process=a , )
lowercase__ : Optional[int] = TensorFlowBenchmark(a )
lowercase__ : Optional[Any] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(a , 'log.txt' ) ).exists() )
| 645
| 1
|
"""simple docstring"""
import numpy as np
_UpperCamelCase : Optional[Any] = [
["a", "b", "c", "d", "e"],
["f", "g", "h", "i", "k"],
["l", "m", "n", "o", "p"],
["q", "r", "s", "t", "u"],
["v", "w", "x", "y", "z"],
]
class UpperCAmelCase_ :
def __init__( self ) -> None:
lowercase__ : Optional[int] = np.array(a )
def _UpperCAmelCase ( self , a ) -> np.ndarray:
lowercase__ , lowercase__ : List[str] = np.where(letter == self.SQUARE )
lowercase__ : Tuple = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def _UpperCAmelCase ( self , a , a ) -> str:
lowercase__ : List[str] = self.SQUARE[indexa - 1, indexa - 1]
return letter
def _UpperCAmelCase ( self , a ) -> str:
lowercase__ : Dict = message.lower()
lowercase__ : List[Any] = message.replace(' ' , '' )
lowercase__ : Optional[Any] = message.replace('j' , 'i' )
lowercase__ : str = np.empty((2, len(a )) )
for letter_index in range(len(a ) ):
lowercase__ : Optional[int] = self.letter_to_numbers(message[letter_index] )
lowercase__ : str = numbers[0]
lowercase__ : Union[str, Any] = numbers[1]
lowercase__ : int = first_step.reshape(2 * len(a ) )
lowercase__ : Any = ''
for numbers_index in range(len(a ) ):
lowercase__ : List[str] = int(second_step[numbers_index * 2] )
lowercase__ : Any = int(second_step[(numbers_index * 2) + 1] )
lowercase__ : Any = self.numbers_to_letter(a , a )
lowercase__ : Union[str, Any] = encoded_message + letter
return encoded_message
def _UpperCAmelCase ( self , a ) -> str:
lowercase__ : Tuple = message.lower()
message.replace(' ' , '' )
lowercase__ : Any = np.empty(2 * len(a ) )
for letter_index in range(len(a ) ):
lowercase__ : List[Any] = self.letter_to_numbers(message[letter_index] )
lowercase__ : List[str] = numbers[0]
lowercase__ : int = numbers[1]
lowercase__ : Any = first_step.reshape((2, len(a )) )
lowercase__ : Optional[Any] = ''
for numbers_index in range(len(a ) ):
lowercase__ : int = int(second_step[0, numbers_index] )
lowercase__ : List[str] = int(second_step[1, numbers_index] )
lowercase__ : Tuple = self.numbers_to_letter(a , a )
lowercase__ : Any = decoded_message + letter
return decoded_message
| 645
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class UpperCAmelCase_ ( _a):
def __init__( self , a , a=1_3 , a=7 , a=True , a=True , a=False , a=True , a=9_9 , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=5_1_2 , a=1_6 , a=2 , a=0.02 , a=3 , a=4 , a=None , ) -> Any:
lowercase__ : Tuple = parent
lowercase__ : List[Any] = batch_size
lowercase__ : List[Any] = seq_length
lowercase__ : List[Any] = is_training
lowercase__ : Optional[Any] = use_input_mask
lowercase__ : Optional[int] = use_token_type_ids
lowercase__ : int = use_labels
lowercase__ : Tuple = vocab_size
lowercase__ : int = hidden_size
lowercase__ : Any = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
lowercase__ : Optional[Any] = intermediate_size
lowercase__ : Optional[Any] = hidden_act
lowercase__ : List[str] = hidden_dropout_prob
lowercase__ : List[Any] = attention_probs_dropout_prob
lowercase__ : List[Any] = max_position_embeddings
lowercase__ : List[str] = type_vocab_size
lowercase__ : Tuple = type_sequence_label_size
lowercase__ : List[Any] = initializer_range
lowercase__ : str = num_labels
lowercase__ : Tuple = num_choices
lowercase__ : str = scope
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : str = None
if self.use_input_mask:
lowercase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Dict = None
lowercase__ : Optional[Any] = None
lowercase__ : int = None
if self.use_labels:
lowercase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ : List[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self ) -> Optional[int]:
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Dict:
lowercase__ : Tuple = DistilBertModel(config=a )
model.to(a )
model.eval()
lowercase__ : Any = model(a , a )
lowercase__ : str = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Dict:
lowercase__ : Optional[int] = DistilBertForMaskedLM(config=a )
model.to(a )
model.eval()
lowercase__ : Union[str, Any] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> int:
lowercase__ : Tuple = DistilBertForQuestionAnswering(config=a )
model.to(a )
model.eval()
lowercase__ : Tuple = model(
a , attention_mask=a , start_positions=a , end_positions=a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> List[str]:
lowercase__ : int = self.num_labels
lowercase__ : Dict = DistilBertForSequenceClassification(a )
model.to(a )
model.eval()
lowercase__ : Optional[Any] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Any:
lowercase__ : Any = self.num_labels
lowercase__ : List[str] = DistilBertForTokenClassification(config=a )
model.to(a )
model.eval()
lowercase__ : Any = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Tuple:
lowercase__ : List[Any] = self.num_choices
lowercase__ : Any = DistilBertForMultipleChoice(config=a )
model.to(a )
model.eval()
lowercase__ : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : int = model(
a , attention_mask=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Union[str, Any] = self.prepare_config_and_inputs()
((lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__)) : List[str] = config_and_inputs
lowercase__ : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : List[str] = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCamelCase__ : str = (
{
"feature-extraction": DistilBertModel,
"fill-mask": DistilBertForMaskedLM,
"question-answering": DistilBertForQuestionAnswering,
"text-classification": DistilBertForSequenceClassification,
"token-classification": DistilBertForTokenClassification,
"zero-shot": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : Any = True
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : Optional[Any] = True
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : str = DistilBertModelTester(self )
lowercase__ : int = ConfigTester(self , config_class=a , dim=3_7 )
def _UpperCAmelCase ( self ) -> Dict:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*a )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*a )
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*a )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*a )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*a )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*a )
@slow
def _UpperCAmelCase ( self ) -> str:
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : str = DistilBertModel.from_pretrained(a )
self.assertIsNotNone(a )
@slow
@require_torch_gpu
def _UpperCAmelCase ( self ) -> Any:
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
lowercase__ : Optional[int] = True
lowercase__ : Union[str, Any] = model_class(config=a )
lowercase__ : int = self._prepare_for_class(a , a )
lowercase__ : Tuple = torch.jit.trace(
a , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a , os.path.join(a , 'traced_model.pt' ) )
lowercase__ : Optional[int] = torch.jit.load(os.path.join(a , 'traced_model.pt' ) , map_location=a )
loaded(inputs_dict['input_ids'].to(a ) , inputs_dict['attention_mask'].to(a ) )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : int = DistilBertModel.from_pretrained('distilbert-base-uncased' )
lowercase__ : Union[str, Any] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
lowercase__ : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase__ : Optional[Any] = model(a , attention_mask=a )[0]
lowercase__ : Tuple = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , a )
lowercase__ : List[Any] = torch.tensor(
[[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) )
| 645
| 1
|
"""simple docstring"""
from math import isqrt
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
return all(number % divisor != 0 for divisor in range(2 , isqrt(_lowerCAmelCase ) + 1 ) )
def a_ ( _lowerCAmelCase : int = 10**6 ):
'''simple docstring'''
lowercase__ : List[Any] = 0
lowercase__ : Any = 1
lowercase__ : Union[str, Any] = 7
while prime_candidate < max_prime:
primes_count += is_prime(_lowerCAmelCase )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 645
|
"""simple docstring"""
from __future__ import annotations
def a_ ( _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float , ):
'''simple docstring'''
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif stress < 0:
raise ValueError('Stress cannot be negative' )
elif tangential_force < 0:
raise ValueError('Tangential Force cannot be negative' )
elif area < 0:
raise ValueError('Area cannot be negative' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 645
| 1
|
"""simple docstring"""
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
_UpperCamelCase : Dict = logging.get_logger(__name__)
_UpperCamelCase : List[Any] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"encoder.layer_norm_for_extract": "layer_norm_for_extract",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"label_embs_concat": "label_embeddings_concat",
"mask_emb": "masked_spec_embed",
"spk_proj": "speaker_proj",
}
_UpperCamelCase : List[str] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"label_embeddings_concat",
"speaker_proj",
"layer_norm_for_extract",
]
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple ):
'''simple docstring'''
for attribute in key.split('.' ):
lowercase__ : Dict = getattr(_lowerCAmelCase , _lowerCAmelCase )
if weight_type is not None:
lowercase__ : Optional[int] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape
else:
lowercase__ : Optional[int] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
lowercase__ : Optional[Any] = value
elif weight_type == "weight_g":
lowercase__ : Dict = value
elif weight_type == "weight_v":
lowercase__ : List[str] = value
elif weight_type == "bias":
lowercase__ : Optional[Any] = value
else:
lowercase__ : List[str] = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : Tuple = []
lowercase__ : List[str] = fairseq_model.state_dict()
lowercase__ : Union[str, Any] = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
lowercase__ : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == 'group' , )
lowercase__ : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
lowercase__ : List[Any] = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
lowercase__ : int = True
if "*" in mapped_key:
lowercase__ : Optional[int] = name.split(_lowerCAmelCase )[0].split('.' )[-2]
lowercase__ : List[str] = mapped_key.replace('*' , _lowerCAmelCase )
if "weight_g" in name:
lowercase__ : List[Any] = 'weight_g'
elif "weight_v" in name:
lowercase__ : int = 'weight_v'
elif "bias" in name:
lowercase__ : Dict = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase__ : Union[str, Any] = 'weight'
else:
lowercase__ : int = None
set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def a_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Dict ):
'''simple docstring'''
lowercase__ : int = full_name.split('conv_layers.' )[-1]
lowercase__ : int = name.split('.' )
lowercase__ : int = int(items[0] )
lowercase__ : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
lowercase__ : Union[str, Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
lowercase__ : Optional[int] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
lowercase__ : List[Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
lowercase__ : int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_lowerCAmelCase )
@torch.no_grad()
def a_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : str=None , _lowerCAmelCase : Tuple=True ):
'''simple docstring'''
if config_path is not None:
lowercase__ : Any = UniSpeechSatConfig.from_pretrained(_lowerCAmelCase )
else:
lowercase__ : Any = UniSpeechSatConfig()
lowercase__ : Union[str, Any] = ''
if is_finetuned:
lowercase__ : Optional[Any] = UniSpeechSatForCTC(_lowerCAmelCase )
else:
lowercase__ : List[Any] = UniSpeechSatForPreTraining(_lowerCAmelCase )
lowercase__ , lowercase__ , lowercase__ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
lowercase__ : Union[str, Any] = model[0].eval()
recursively_load_weights(_lowerCAmelCase , _lowerCAmelCase )
hf_wavavec.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_UpperCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_UpperCamelCase : str = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 645
|
"""simple docstring"""
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
def __init__( self , a , a=1_3 , a=[3_0, 3_0] , a=2 , a=3 , a=True , a=True , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=1_0 , a=0.02 , a=3 , a=None , a=8 , a=1_0 , ) -> Any:
lowercase__ : List[str] = parent
lowercase__ : Optional[Any] = batch_size
lowercase__ : Optional[int] = image_size
lowercase__ : List[Any] = patch_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : str = is_training
lowercase__ : Optional[Any] = use_labels
lowercase__ : Optional[Any] = hidden_size
lowercase__ : Dict = num_hidden_layers
lowercase__ : Optional[Any] = num_attention_heads
lowercase__ : Dict = intermediate_size
lowercase__ : List[Any] = hidden_act
lowercase__ : List[Any] = hidden_dropout_prob
lowercase__ : Any = attention_probs_dropout_prob
lowercase__ : Any = type_sequence_label_size
lowercase__ : Dict = initializer_range
lowercase__ : Union[str, Any] = num_labels
lowercase__ : Tuple = scope
lowercase__ : Tuple = n_targets
lowercase__ : Optional[int] = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
lowercase__ : Optional[Any] = (image_size[1] // patch_size) * (image_size[0] // patch_size)
lowercase__ : Tuple = num_patches + 1 + self.num_detection_tokens
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
lowercase__ : Tuple = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
lowercase__ : int = []
for i in range(self.batch_size ):
lowercase__ : Optional[Any] = {}
lowercase__ : Any = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=a )
lowercase__ : List[str] = torch.rand(self.n_targets , 4 , device=a )
labels.append(a )
lowercase__ : Tuple = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> List[Any]:
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def _UpperCAmelCase ( self , a , a , a ) -> int:
lowercase__ : List[str] = YolosModel(config=a )
model.to(a )
model.eval()
lowercase__ : List[Any] = model(a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def _UpperCAmelCase ( self , a , a , a ) -> Union[str, Any]:
lowercase__ : str = YolosForObjectDetection(a )
model.to(a )
model.eval()
lowercase__ : Dict = model(pixel_values=a )
lowercase__ : Tuple = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
lowercase__ : str = model(pixel_values=a , labels=a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : int = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Any = config_and_inputs
lowercase__ : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : Optional[int] = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
lowerCamelCase__ : List[str] = (
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
lowerCamelCase__ : List[Any] = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Tuple = False
lowerCamelCase__ : Union[str, Any] = False
def _UpperCAmelCase ( self , a , a , a=False ) -> Dict:
lowercase__ : List[str] = super()._prepare_for_class(a , a , return_labels=a )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
lowercase__ : Optional[Any] = []
for i in range(self.model_tester.batch_size ):
lowercase__ : Dict = {}
lowercase__ : Dict = torch.ones(
size=(self.model_tester.n_targets,) , device=a , dtype=torch.long )
lowercase__ : Optional[Any] = torch.ones(
self.model_tester.n_targets , 4 , device=a , dtype=torch.float )
labels.append(a )
lowercase__ : Union[str, Any] = labels
return inputs_dict
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : Dict = YolosModelTester(self )
lowercase__ : Optional[int] = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=3_7 )
def _UpperCAmelCase ( self ) -> str:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Optional[Any]:
# YOLOS does not use inputs_embeds
pass
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : int = model_class(a )
lowercase__ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Tuple = [*signature.parameters.keys()]
lowercase__ : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Dict = True
# in YOLOS, the seq_len is different
lowercase__ : Tuple = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
lowercase__ : Optional[int] = True
lowercase__ : str = False
lowercase__ : str = True
lowercase__ : List[str] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Any = model(**self._prepare_for_class(a , a ) )
lowercase__ : str = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ : Optional[int] = True
lowercase__ : List[Any] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Union[str, Any] = model(**self._prepare_for_class(a , a ) )
lowercase__ : List[str] = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowercase__ : Dict = len(a )
# Check attention is always last and order is fine
lowercase__ : Any = True
lowercase__ : int = True
lowercase__ : int = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Any = model(**self._prepare_for_class(a , a ) )
lowercase__ : Optional[Any] = 1
self.assertEqual(out_len + added_hidden_states , len(a ) )
lowercase__ : Tuple = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _UpperCAmelCase ( self ) -> List[str]:
def check_hidden_states_output(a , a , a ):
lowercase__ : str = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : int = model(**self._prepare_for_class(a , a ) )
lowercase__ : int = outputs.hidden_states
lowercase__ : Any = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(a ) , a )
# YOLOS has a different seq_length
lowercase__ : Optional[int] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Any = True
check_hidden_states_output(a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : List[Any] = True
check_hidden_states_output(a , a , a )
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*a )
@slow
def _UpperCAmelCase ( self ) -> Union[str, Any]:
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : int = YolosModel.from_pretrained(a )
self.assertIsNotNone(a )
def a_ ( ):
'''simple docstring'''
lowercase__ : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
@cached_property
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return AutoImageProcessor.from_pretrained('hustvl/yolos-small' ) if is_vision_available() else None
@slow
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Dict = YolosForObjectDetection.from_pretrained('hustvl/yolos-small' ).to(a )
lowercase__ : Tuple = self.default_image_processor
lowercase__ : Optional[int] = prepare_img()
lowercase__ : int = image_processor(images=a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ : int = model(inputs.pixel_values )
# verify outputs
lowercase__ : Tuple = torch.Size((1, 1_0_0, 9_2) )
self.assertEqual(outputs.logits.shape , a )
lowercase__ : Any = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] , device=a , )
lowercase__ : List[str] = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] , device=a )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , a , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , a , atol=1e-4 ) )
# verify postprocessing
lowercase__ : Optional[Any] = image_processor.post_process_object_detection(
a , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
lowercase__ : str = torch.tensor([0.9_994, 0.9_790, 0.9_964, 0.9_972, 0.9_861] ).to(a )
lowercase__ : Any = [7_5, 7_5, 1_7, 6_3, 1_7]
lowercase__ : Optional[int] = torch.tensor([335.0_609, 79.3_848, 375.4_216, 187.2_495] ).to(a )
self.assertEqual(len(results['scores'] ) , 5 )
self.assertTrue(torch.allclose(results['scores'] , a , atol=1e-4 ) )
self.assertSequenceEqual(results['labels'].tolist() , a )
self.assertTrue(torch.allclose(results['boxes'][0, :] , a ) )
| 645
| 1
|
"""simple docstring"""
from graphs.minimum_spanning_tree_kruskal import kruskal
def a_ ( ):
'''simple docstring'''
lowercase__ : Optional[int] = 9
lowercase__ : str = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
lowercase__ : Any = kruskal(_lowerCAmelCase , _lowerCAmelCase )
lowercase__ : Optional[int] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(_lowerCAmelCase ) == sorted(_lowerCAmelCase )
| 645
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
_UpperCamelCase : int = logging.get_logger(__name__)
@dataclass
class UpperCAmelCase_ :
def __init__( self , a=False , a=False , a=6.0 , a=None , a=False , a=False , a=None , a="fp4" , a=False , **a , ) -> Tuple:
lowercase__ : str = load_in_abit
lowercase__ : str = load_in_abit
lowercase__ : List[str] = llm_inta_threshold
lowercase__ : Dict = llm_inta_skip_modules
lowercase__ : Tuple = llm_inta_enable_fpaa_cpu_offload
lowercase__ : Any = llm_inta_has_fpaa_weight
lowercase__ : Any = bnb_abit_quant_type
lowercase__ : Dict = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
lowercase__ : Dict = torch.floataa
elif isinstance(a , a ):
lowercase__ : Any = getattr(a , a )
elif isinstance(a , torch.dtype ):
lowercase__ : Any = bnb_abit_compute_dtype
else:
raise ValueError('bnb_4bit_compute_dtype must be a string or a torch.dtype' )
self.post_init()
def _UpperCAmelCase ( self ) -> str:
if not isinstance(self.llm_inta_threshold , a ):
raise ValueError('llm_int8_threshold must be a float' )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , a ):
raise ValueError('llm_int8_skip_modules must be a list of strings' )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , a ):
raise ValueError('llm_int8_enable_fp32_cpu_offload must be a boolean' )
if not isinstance(self.llm_inta_has_fpaa_weight , a ):
raise ValueError('llm_int8_has_fp16_weight must be a boolean' )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError('bnb_4bit_compute_dtype must be torch.dtype' )
if not isinstance(self.bnb_abit_quant_type , a ):
raise ValueError('bnb_4bit_quant_type must be a string' )
if not isinstance(self.bnb_abit_use_double_quant , a ):
raise ValueError('bnb_4bit_use_double_quant must be a boolean' )
if self.load_in_abit and not version.parse(importlib.metadata.version('bitsandbytes' ) ) >= version.parse(
'0.39.0' ):
raise ValueError(
'4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version' )
def _UpperCAmelCase ( self ) -> Tuple:
return self.load_in_abit or self.load_in_abit
def _UpperCAmelCase ( self ) -> List[str]:
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def _UpperCAmelCase ( cls , a , a , **a ) -> Optional[Any]:
lowercase__ : List[Any] = cls(**a )
lowercase__ : Union[str, Any] = []
for key, value in kwargs.items():
if hasattr(a , a ):
setattr(a , a , a )
to_remove.append(a )
for key in to_remove:
kwargs.pop(a , a )
if return_unused_kwargs:
return config, kwargs
else:
return config
def _UpperCAmelCase ( self , a ) -> Dict:
with open(a , 'w' , encoding='utf-8' ) as writer:
lowercase__ : Any = self.to_dict()
lowercase__ : str = json.dumps(a , indent=2 , sort_keys=a ) + '\n'
writer.write(a )
def _UpperCAmelCase ( self ) -> Dict[str, Any]:
lowercase__ : Optional[Any] = copy.deepcopy(self.__dict__ )
lowercase__ : Any = str(output['bnb_4bit_compute_dtype'] ).split('.' )[1]
return output
def __repr__( self ) -> Dict:
return f"""{self.__class__.__name__} {self.to_json_string()}"""
def _UpperCAmelCase ( self , a = True ) -> str:
if use_diff is True:
lowercase__ : List[Any] = self.to_diff_dict()
else:
lowercase__ : List[str] = self.to_dict()
return json.dumps(a , indent=2 , sort_keys=a ) + "\n"
def _UpperCAmelCase ( self ) -> Dict[str, Any]:
lowercase__ : Tuple = self.to_dict()
# get the default config dict
lowercase__ : Optional[Any] = BitsAndBytesConfig().to_dict()
lowercase__ : int = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
lowercase__ : Optional[int] = value
return serializable_config_dict
| 645
| 1
|
"""simple docstring"""
def a_ ( _lowerCAmelCase : int = 10**9 ):
'''simple docstring'''
lowercase__ : Union[str, Any] = 1
lowercase__ : Union[str, Any] = 2
lowercase__ : Tuple = 0
lowercase__ : Union[str, Any] = 0
lowercase__ : List[Any] = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
lowercase__ : Tuple = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f'''{solution() = }''')
| 645
|
"""simple docstring"""
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_UpperCamelCase : int = 16
_UpperCamelCase : Union[str, Any] = 32
def a_ ( _lowerCAmelCase : Tuple ):
'''simple docstring'''
return int(x / 2**20 )
class UpperCAmelCase_ :
def __enter__( self ) -> Union[str, Any]:
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
lowercase__ : List[str] = torch.cuda.memory_allocated()
return self
def __exit__( self , *a ) -> Any:
gc.collect()
torch.cuda.empty_cache()
lowercase__ : Optional[Any] = torch.cuda.memory_allocated()
lowercase__ : Union[str, Any] = torch.cuda.max_memory_allocated()
lowercase__ : List[Any] = bamb(self.end - self.begin )
lowercase__ : List[Any] = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def a_ ( _lowerCAmelCase : Accelerator , _lowerCAmelCase : int = 16 , _lowerCAmelCase : str = "bert-base-cased" , _lowerCAmelCase : int = 320 , _lowerCAmelCase : int = 160 , ):
'''simple docstring'''
lowercase__ : List[Any] = AutoTokenizer.from_pretrained(_lowerCAmelCase )
lowercase__ : Union[str, Any] = load_dataset(
'glue' , 'mrpc' , split={'train': f"""train[:{n_train}]""", 'validation': f"""validation[:{n_val}]"""} )
def tokenize_function(_lowerCAmelCase : int ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ : List[str] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowercase__ : Union[str, Any] = datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=_lowerCAmelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ : Union[str, Any] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_lowerCAmelCase : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowerCAmelCase , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(_lowerCAmelCase , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
lowercase__ : Dict = DataLoader(
tokenized_datasets['train'] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
lowercase__ : Dict = DataLoader(
tokenized_datasets['validation'] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
return train_dataloader, eval_dataloader
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : List[str] ):
'''simple docstring'''
lowercase__ : List[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ : Optional[int] = config['lr']
lowercase__ : Optional[Any] = int(config['num_epochs'] )
lowercase__ : Optional[Any] = int(config['seed'] )
lowercase__ : int = int(config['batch_size'] )
lowercase__ : Union[str, Any] = args.model_name_or_path
set_seed(_lowerCAmelCase )
lowercase__ , lowercase__ : Tuple = get_dataloaders(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ : List[Any] = AutoModelForSequenceClassification.from_pretrained(_lowerCAmelCase , return_dict=_lowerCAmelCase )
# Instantiate optimizer
lowercase__ : List[Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowercase__ : Optional[Any] = optimizer_cls(params=model.parameters() , lr=_lowerCAmelCase )
if accelerator.state.deepspeed_plugin is not None:
lowercase__ : Optional[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
lowercase__ : List[Any] = 1
lowercase__ : List[Any] = (len(_lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowercase__ : Optional[int] = get_linear_schedule_with_warmup(
optimizer=_lowerCAmelCase , num_warmup_steps=0 , num_training_steps=_lowerCAmelCase , )
else:
lowercase__ : Tuple = DummyScheduler(_lowerCAmelCase , total_num_steps=_lowerCAmelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = accelerator.prepare(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# We need to keep track of how many total steps we have iterated over
lowercase__ : Optional[int] = 0
# We also need to keep track of the stating epoch so files are named properly
lowercase__ : Tuple = 0
# Now we train the model
lowercase__ : Optional[Any] = {}
for epoch in range(_lowerCAmelCase , _lowerCAmelCase ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(_lowerCAmelCase ):
lowercase__ : List[Any] = model(**_lowerCAmelCase )
lowercase__ : Dict = outputs.loss
lowercase__ : int = loss / gradient_accumulation_steps
accelerator.backward(_lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('Memory before entering the train : {}'.format(bamb(tracemalloc.begin ) ) )
accelerator.print('Memory consumed at the end of the train (end-begin): {}'.format(tracemalloc.used ) )
accelerator.print('Peak Memory consumed during the train (max-begin): {}'.format(tracemalloc.peaked ) )
accelerator.print(
'Total Peak Memory consumed during the train (max): {}'.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
lowercase__ : int = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'peak_memory_utilization.json' ) , 'w' ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
def a_ ( ):
'''simple docstring'''
lowercase__ : int = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=_lowerCAmelCase , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_lowerCAmelCase , )
parser.add_argument(
'--output_dir' , type=_lowerCAmelCase , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--peak_memory_upper_bound' , type=_lowerCAmelCase , default=_lowerCAmelCase , help='The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.' , )
parser.add_argument(
'--n_train' , type=_lowerCAmelCase , default=320 , help='Number of training examples to use.' , )
parser.add_argument(
'--n_val' , type=_lowerCAmelCase , default=160 , help='Number of validation examples to use.' , )
parser.add_argument(
'--num_epochs' , type=_lowerCAmelCase , default=1 , help='Number of train epochs.' , )
lowercase__ : Any = parser.parse_args()
lowercase__ : Optional[Any] = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
main()
| 645
| 1
|
"""simple docstring"""
import math
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : Optional[Any] = []
lowercase__ : str = 2
lowercase__ : Dict = int(math.sqrt(_lowerCAmelCase ) ) # Size of every segment
lowercase__ : List[Any] = [True] * (end + 1)
lowercase__ : Any = []
while start <= end:
if temp[start] is True:
in_prime.append(_lowerCAmelCase )
for i in range(start * start , end + 1 , _lowerCAmelCase ):
lowercase__ : Any = False
start += 1
prime += in_prime
lowercase__ : Union[str, Any] = end + 1
lowercase__ : Any = min(2 * end , _lowerCAmelCase )
while low <= n:
lowercase__ : Any = [True] * (high - low + 1)
for each in in_prime:
lowercase__ : List[str] = math.floor(low / each ) * each
if t < low:
t += each
for j in range(_lowerCAmelCase , high + 1 , _lowerCAmelCase ):
lowercase__ : Tuple = False
for j in range(len(_lowerCAmelCase ) ):
if temp[j] is True:
prime.append(j + low )
lowercase__ : Any = high + 1
lowercase__ : int = min(high + end , _lowerCAmelCase )
return prime
print(sieve(10**6))
| 645
|
"""simple docstring"""
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : Any = [0] * len(_lowerCAmelCase )
for i in range(1 , len(_lowerCAmelCase ) ):
# use last results for better performance - dynamic programming
lowercase__ : List[str] = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
lowercase__ : Dict = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
lowercase__ : Union[str, Any] = j
return prefix_result
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
return max(prefix_function(_lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 645
| 1
|
"""simple docstring"""
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
_UpperCamelCase : Any = {
"n_samples": 64,
"horizon": 32,
"num_inference_steps": 20,
"n_guide_steps": 2, # can set to 0 for faster sampling, does not use value network
"scale_grad_by_std": True,
"scale": 0.1,
"eta": 0.0,
"t_grad_cutoff": 2,
"device": "cpu",
}
if __name__ == "__main__":
_UpperCamelCase : Dict = "hopper-medium-v2"
_UpperCamelCase : int = gym.make(env_name)
_UpperCamelCase : Optional[Any] = ValueGuidedRLPipeline.from_pretrained(
"bglick13/hopper-medium-v2-value-function-hor32",
env=env,
)
env.seed(0)
_UpperCamelCase : Optional[Any] = env.reset()
_UpperCamelCase : Optional[int] = 0
_UpperCamelCase : List[str] = 0
_UpperCamelCase : List[Any] = 10_00
_UpperCamelCase : int = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
_UpperCamelCase : int = pipeline(obs, planning_horizon=32)
# execute action in environment
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = env.step(denorm_actions)
_UpperCamelCase : List[Any] = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
f'''Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'''
f''' {total_score}'''
)
# save observations for rendering
rollout.append(next_observation.copy())
_UpperCamelCase : Union[str, Any] = next_observation
except KeyboardInterrupt:
pass
print(f'''Total reward: {total_reward}''')
| 645
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCAmelCase_ ( unittest.TestCase):
def __init__( self , a , a=7 , a=3 , a=1_8 , a=3_0 , a=4_0_0 , a=True , a=None , a=True , a=None , a=True , ) -> List[str]:
lowercase__ : Tuple = size if size is not None else {'shortest_edge': 2_0}
lowercase__ : Union[str, Any] = crop_size if crop_size is not None else {'height': 1_8, 'width': 1_8}
lowercase__ : Optional[int] = parent
lowercase__ : Optional[int] = batch_size
lowercase__ : str = num_channels
lowercase__ : Any = image_size
lowercase__ : Optional[Any] = min_resolution
lowercase__ : int = max_resolution
lowercase__ : List[Any] = do_resize
lowercase__ : List[str] = size
lowercase__ : str = do_center_crop
lowercase__ : List[Any] = crop_size
lowercase__ : Union[str, Any] = do_flip_channel_order
def _UpperCAmelCase ( self ) -> int:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class UpperCAmelCase_ ( _a , unittest.TestCase):
lowerCamelCase__ : Optional[Any] = MobileViTImageProcessor if is_vision_available() else None
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Tuple = MobileViTImageProcessingTester(self )
@property
def _UpperCAmelCase ( self ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , 'do_resize' ) )
self.assertTrue(hasattr(a , 'size' ) )
self.assertTrue(hasattr(a , 'do_center_crop' ) )
self.assertTrue(hasattr(a , 'center_crop' ) )
self.assertTrue(hasattr(a , 'do_flip_channel_order' ) )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 2_0} )
self.assertEqual(image_processor.crop_size , {'height': 1_8, 'width': 1_8} )
lowercase__ : str = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {'shortest_edge': 4_2} )
self.assertEqual(image_processor.crop_size , {'height': 8_4, 'width': 8_4} )
def _UpperCAmelCase ( self ) -> Tuple:
pass
def _UpperCAmelCase ( self ) -> str:
# Initialize image_processing
lowercase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
lowercase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase__ : List[Any] = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _UpperCAmelCase ( self ) -> Tuple:
# Initialize image_processing
lowercase__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
lowercase__ : List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase__ : Any = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _UpperCAmelCase ( self ) -> Dict:
# Initialize image_processing
lowercase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
lowercase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase__ : Tuple = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 645
| 1
|
"""simple docstring"""
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 645
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class UpperCAmelCase_ ( unittest.TestCase):
def __init__( self , a , a=1_3 , a=7 , a=True , a=True , a=True , a=True , a=9_9 , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=5_1_2 , a=1_6 , a=2 , a=0.02 , a=4 , ) -> Dict:
lowercase__ : Optional[Any] = parent
lowercase__ : Dict = batch_size
lowercase__ : List[Any] = seq_length
lowercase__ : int = is_training
lowercase__ : str = use_attention_mask
lowercase__ : Dict = use_token_type_ids
lowercase__ : Optional[int] = use_labels
lowercase__ : Tuple = vocab_size
lowercase__ : List[str] = hidden_size
lowercase__ : Union[str, Any] = num_hidden_layers
lowercase__ : int = num_attention_heads
lowercase__ : Dict = intermediate_size
lowercase__ : List[str] = hidden_act
lowercase__ : Dict = hidden_dropout_prob
lowercase__ : Tuple = attention_probs_dropout_prob
lowercase__ : List[str] = max_position_embeddings
lowercase__ : int = type_vocab_size
lowercase__ : List[str] = type_sequence_label_size
lowercase__ : Union[str, Any] = initializer_range
lowercase__ : Optional[int] = num_choices
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : str = None
if self.use_attention_mask:
lowercase__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : List[str] = None
if self.use_token_type_ids:
lowercase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ : Any = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Optional[int] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Tuple = config_and_inputs
lowercase__ : Union[str, Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase_ ( _a , unittest.TestCase):
lowerCamelCase__ : Tuple = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Union[str, Any] = FlaxAlbertModelTester(self )
@slow
def _UpperCAmelCase ( self ) -> str:
for model_class_name in self.all_model_classes:
lowercase__ : str = model_class_name.from_pretrained('albert-base-v2' )
lowercase__ : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(a )
@require_flax
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : str = FlaxAlbertModel.from_pretrained('albert-base-v2' )
lowercase__ : Optional[int] = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
lowercase__ : Optional[Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowercase__ : Any = model(a , attention_mask=a )[0]
lowercase__ : Tuple = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , a )
lowercase__ : Optional[Any] = np.array(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) )
| 645
| 1
|
"""simple docstring"""
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class UpperCAmelCase_ ( _a):
def __init__( self , a , a = None , a = None , a = True , a = None , a = False , a = None , a = True , a = "arrow" , **a , ) -> Optional[Any]:
super().__init__(
split=a , features=a , cache_dir=a , keep_in_memory=a , streaming=a , **a , )
lowercase__ : Tuple = load_from_cache_file
lowercase__ : Optional[Any] = file_format
lowercase__ : List[Any] = Spark(
df=a , features=a , cache_dir=a , working_dir=a , **a , )
def _UpperCAmelCase ( self ) -> Any:
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
lowercase__ : List[Any] = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=a , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 645
|
"""simple docstring"""
from collections.abc import Sequence
def a_ ( _lowerCAmelCase : Sequence[float] , _lowerCAmelCase : float ):
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(_lowerCAmelCase ) )
def a_ ( _lowerCAmelCase : Sequence[float] , _lowerCAmelCase : float ):
'''simple docstring'''
lowercase__ : int = 0.0
for coeff in reversed(_lowerCAmelCase ):
lowercase__ : List[Any] = result * x + coeff
return result
if __name__ == "__main__":
_UpperCamelCase : int = (0.0, 0.0, 5.0, 9.3, 7.0)
_UpperCamelCase : Dict = 1_0.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 645
| 1
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : Tuple = ["image_processor", "tokenizer"]
lowerCamelCase__ : Optional[int] = "AutoImageProcessor"
lowerCamelCase__ : Dict = "AutoTokenizer"
def __init__( self , a , a ) -> Any:
super().__init__(a , a )
lowercase__ : Optional[Any] = self.image_processor
def __call__( self , a=None , a=None , a=None , **a ) -> Tuple:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
lowercase__ : List[str] = self.tokenizer(a , return_tensors=a , **a )
if images is not None:
lowercase__ : Optional[int] = self.image_processor(a , return_tensors=a , **a )
if text is not None and images is not None:
lowercase__ : str = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a ) , tensor_type=a )
def _UpperCAmelCase ( self , *a , **a ) -> Optional[Any]:
return self.tokenizer.batch_decode(*a , **a )
def _UpperCAmelCase ( self , *a , **a ) -> Optional[int]:
return self.tokenizer.decode(*a , **a )
@property
def _UpperCAmelCase ( self ) -> Optional[Any]:
return ["input_ids", "attention_mask", "pixel_values"]
| 645
|
"""simple docstring"""
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
_UpperCamelCase : Any = [
{"dataset": "wikipedia", "config_name": "20220301.de"},
{"dataset": "wikipedia", "config_name": "20220301.en"},
{"dataset": "wikipedia", "config_name": "20220301.fr"},
{"dataset": "wikipedia", "config_name": "20220301.frr"},
{"dataset": "wikipedia", "config_name": "20220301.it"},
{"dataset": "wikipedia", "config_name": "20220301.simple"},
{"dataset": "snli", "config_name": "plain_text"},
{"dataset": "eli5", "config_name": "LFQA_reddit"},
{"dataset": "wiki40b", "config_name": "en"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"},
{"dataset": "natural_questions", "config_name": "default"},
]
def a_ ( _lowerCAmelCase : Optional[Any]=True ):
'''simple docstring'''
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_a))
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : str = None
lowerCamelCase__ : Optional[Any] = None
def _UpperCAmelCase ( self , a , a ) -> List[Any]:
with TemporaryDirectory() as tmp_dir:
lowercase__ : List[str] = dataset_module_factory(a , cache_dir=a )
lowercase__ : List[Any] = import_main_class(dataset_module.module_path , dataset=a )
lowercase__ : DatasetBuilder = builder_cls(
cache_dir=a , config_name=a , hash=dataset_module.hash , )
lowercase__ : Union[str, Any] = '/'.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=a ).replace(os.sep , '/' ),
config.DATASET_INFO_FILENAME,
] )
lowercase__ : Union[str, Any] = cached_path(a , cache_dir=a )
self.assertTrue(os.path.exists(a ) )
@pytest.mark.integration
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : Union[str, Any] = tmp_path_factory.mktemp('test_hf_gcp' ) / 'test_wikipedia_simple'
lowercase__ : int = dataset_module_factory('wikipedia' , cache_dir=_lowerCAmelCase )
lowercase__ : Optional[int] = import_main_class(dataset_module.module_path )
lowercase__ : DatasetBuilder = builder_cls(
cache_dir=_lowerCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
lowercase__ : Optional[int] = None
builder_instance.download_and_prepare()
lowercase__ : Optional[int] = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def a_ ( _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : Optional[int] = dataset_module_factory('wikipedia' , cache_dir=_lowerCAmelCase )
lowercase__ : List[str] = import_main_class(dataset_module.module_path , dataset=_lowerCAmelCase )
lowercase__ : DatasetBuilder = builder_cls(
cache_dir=_lowerCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
lowercase__ : Union[str, Any] = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert "train" in ds
assert isinstance(ds['train'] , _lowerCAmelCase )
assert next(iter(ds['train'] ) )
| 645
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase : List[Any] = {
"configuration_xlm_roberta_xl": [
"XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XLMRobertaXLConfig",
"XLMRobertaXLOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Dict = [
"XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMRobertaXLForCausalLM",
"XLMRobertaXLForMaskedLM",
"XLMRobertaXLForMultipleChoice",
"XLMRobertaXLForQuestionAnswering",
"XLMRobertaXLForSequenceClassification",
"XLMRobertaXLForTokenClassification",
"XLMRobertaXLModel",
"XLMRobertaXLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
_UpperCamelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 645
|
"""simple docstring"""
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def a_ ( _lowerCAmelCase : dict ):
'''simple docstring'''
return (data["data"], data["target"])
def a_ ( _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray ):
'''simple docstring'''
lowercase__ : Any = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(_lowerCAmelCase , _lowerCAmelCase )
# Predict target for test data
lowercase__ : str = xgb.predict(_lowerCAmelCase )
lowercase__ : Union[str, Any] = predictions.reshape(len(_lowerCAmelCase ) , 1 )
return predictions
def a_ ( ):
'''simple docstring'''
lowercase__ : Optional[Any] = fetch_california_housing()
lowercase__ , lowercase__ : str = data_handling(_lowerCAmelCase )
lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = train_test_split(
_lowerCAmelCase , _lowerCAmelCase , test_size=0.2_5 , random_state=1 )
lowercase__ : Any = xgboost(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Error printing
print(f"""Mean Absolute Error : {mean_absolute_error(_lowerCAmelCase , _lowerCAmelCase )}""" )
print(f"""Mean Square Error : {mean_squared_error(_lowerCAmelCase , _lowerCAmelCase )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 645
| 1
|
"""simple docstring"""
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ :
def __init__( self , a , a=1_3 , a=7 , a=True , a=True , a=True , a=True , a=9_9 , a=2_4 , a=2 , a=6 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=5_1_2 , a=1_6 , a=2 , a=0.02 , a=3 , a=None , a=1_0_0_0 , ) -> Dict:
lowercase__ : Optional[int] = parent
lowercase__ : List[str] = batch_size
lowercase__ : Optional[int] = seq_length
lowercase__ : List[str] = is_training
lowercase__ : Optional[int] = use_input_mask
lowercase__ : List[Any] = use_token_type_ids
lowercase__ : Any = use_labels
lowercase__ : Union[str, Any] = vocab_size
lowercase__ : List[str] = hidden_size
lowercase__ : Tuple = num_hidden_layers
lowercase__ : Dict = num_attention_heads
lowercase__ : List[str] = intermediate_size
lowercase__ : Dict = hidden_act
lowercase__ : Dict = hidden_dropout_prob
lowercase__ : Optional[Any] = attention_probs_dropout_prob
lowercase__ : int = max_position_embeddings
lowercase__ : int = type_vocab_size
lowercase__ : Tuple = type_sequence_label_size
lowercase__ : List[str] = initializer_range
lowercase__ : Dict = num_labels
lowercase__ : Any = scope
lowercase__ : Dict = range_bbox
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : str = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowercase__ : List[Any] = bbox[i, j, 3]
lowercase__ : str = bbox[i, j, 1]
lowercase__ : Union[str, Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowercase__ : Optional[int] = bbox[i, j, 2]
lowercase__ : Dict = bbox[i, j, 0]
lowercase__ : Tuple = t
lowercase__ : str = None
if self.use_input_mask:
lowercase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
lowercase__ : int = None
if self.use_token_type_ids:
lowercase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ : Optional[int] = None
lowercase__ : Tuple = None
if self.use_labels:
lowercase__ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ : str = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def _UpperCAmelCase ( self ) -> Optional[Any]:
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self , a , a , a , a , a , a , a , ) -> Dict:
lowercase__ : Union[str, Any] = LiltModel(config=a )
model.to(a )
model.eval()
lowercase__ : Optional[Any] = model(a , bbox=a , attention_mask=a , token_type_ids=a )
lowercase__ : Optional[int] = model(a , bbox=a , token_type_ids=a )
lowercase__ : List[str] = model(a , bbox=a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _UpperCAmelCase ( self , a , a , a , a , a , a , a , ) -> Tuple:
lowercase__ : Union[str, Any] = self.num_labels
lowercase__ : List[str] = LiltForTokenClassification(config=a )
model.to(a )
model.eval()
lowercase__ : int = model(
a , bbox=a , attention_mask=a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self , a , a , a , a , a , a , a , ) -> Optional[int]:
lowercase__ : List[str] = LiltForQuestionAnswering(config=a )
model.to(a )
model.eval()
lowercase__ : int = model(
a , bbox=a , attention_mask=a , token_type_ids=a , start_positions=a , end_positions=a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : int = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) : Union[str, Any] = config_and_inputs
lowercase__ : Dict = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , _a , unittest.TestCase):
lowerCamelCase__ : Tuple = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : str = (
{
"feature-extraction": LiltModel,
"question-answering": LiltForQuestionAnswering,
"text-classification": LiltForSequenceClassification,
"token-classification": LiltForTokenClassification,
"zero-shot": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : List[Any] = False
lowerCamelCase__ : Dict = False
def _UpperCAmelCase ( self , a , a , a , a , a ) -> str:
return True
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : List[str] = LiltModelTester(self )
lowercase__ : Optional[int] = ConfigTester(self , config_class=a , hidden_size=3_7 )
def _UpperCAmelCase ( self ) -> int:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase__ : List[str] = type
self.model_tester.create_and_check_model(*a )
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a )
@slow
def _UpperCAmelCase ( self ) -> Optional[int]:
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : str = LiltModel.from_pretrained(a )
self.assertIsNotNone(a )
@require_torch
@slow
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Union[str, Any] = LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(a )
lowercase__ : Any = torch.tensor([[1, 2]] , device=a )
lowercase__ : Union[str, Any] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=a )
# forward pass
with torch.no_grad():
lowercase__ : Tuple = model(input_ids=a , bbox=a )
lowercase__ : str = torch.Size([1, 2, 7_6_8] )
lowercase__ : str = torch.tensor(
[[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]] , device=a , )
self.assertTrue(outputs.last_hidden_state.shape , a )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , a , atol=1e-3 ) )
| 645
|
"""simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class UpperCAmelCase_ :
def __init__( self , a , a=1_3 , a=1_0 , a=3 , a=2 , a=2 , a=2 , a=True , a=True , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=1_0 , a=0.02 , a=0.9 , a=None , ) -> Optional[Any]:
lowercase__ : str = parent
lowercase__ : int = batch_size
lowercase__ : Union[str, Any] = image_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : Dict = patch_size
lowercase__ : Tuple = tubelet_size
lowercase__ : Optional[int] = num_frames
lowercase__ : Optional[int] = is_training
lowercase__ : int = use_labels
lowercase__ : Optional[int] = hidden_size
lowercase__ : Union[str, Any] = num_hidden_layers
lowercase__ : Optional[int] = num_attention_heads
lowercase__ : Any = intermediate_size
lowercase__ : str = hidden_act
lowercase__ : List[Any] = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : Union[str, Any] = type_sequence_label_size
lowercase__ : List[Any] = initializer_range
lowercase__ : str = mask_ratio
lowercase__ : Optional[Any] = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
lowercase__ : Optional[Any] = (image_size // patch_size) ** 2
lowercase__ : str = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
lowercase__ : str = int(mask_ratio * self.seq_length )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : int = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowercase__ : int = None
if self.use_labels:
lowercase__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Dict = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> Tuple:
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self , a , a , a ) -> Optional[int]:
lowercase__ : Dict = VideoMAEModel(config=a )
model.to(a )
model.eval()
lowercase__ : Tuple = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , a , a , a ) -> Union[str, Any]:
lowercase__ : str = VideoMAEForPreTraining(a )
model.to(a )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowercase__ : Any = torch.ones((self.num_masks,) )
lowercase__ : str = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
lowercase__ : Optional[int] = mask.expand(self.batch_size , -1 ).bool()
lowercase__ : str = model(a , a )
# model only returns predictions for masked patches
lowercase__ : str = mask.sum().item()
lowercase__ : int = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Dict = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = config_and_inputs
lowercase__ : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : Tuple = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
lowerCamelCase__ : Optional[int] = (
{"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
lowerCamelCase__ : Any = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : Union[str, Any] = False
lowerCamelCase__ : str = False
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Optional[Any] = VideoMAEModelTester(self )
lowercase__ : Optional[Any] = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=3_7 )
def _UpperCAmelCase ( self , a , a , a=False ) -> Optional[int]:
lowercase__ : Union[str, Any] = copy.deepcopy(a )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowercase__ : Optional[Any] = torch.ones((self.model_tester.num_masks,) )
lowercase__ : Any = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
lowercase__ : Any = mask.expand(self.model_tester.batch_size , -1 ).bool()
lowercase__ : Union[str, Any] = bool_masked_pos.to(a )
if return_labels:
if model_class in [
*get_values(a ),
]:
lowercase__ : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a )
return inputs_dict
def _UpperCAmelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason='VideoMAE does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> Dict:
pass
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : int = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = model_class(a )
lowercase__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[Any] = [*signature.parameters.keys()]
lowercase__ : int = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a )
@slow
def _UpperCAmelCase ( self ) -> str:
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : List[Any] = VideoMAEModel.from_pretrained(a )
self.assertIsNotNone(a )
def _UpperCAmelCase ( self ) -> Optional[Any]:
if not self.has_attentions:
pass
else:
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : str = True
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = self.model_tester.seq_length - self.model_tester.num_masks
lowercase__ : Any = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
lowercase__ : Optional[Any] = True
lowercase__ : int = False
lowercase__ : Any = True
lowercase__ : List[str] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Optional[int] = model(**self._prepare_for_class(a , a ) )
lowercase__ : Dict = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ : str = True
lowercase__ : List[str] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : List[Any] = model(**self._prepare_for_class(a , a ) )
lowercase__ : Optional[Any] = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowercase__ : List[str] = len(a )
# Check attention is always last and order is fine
lowercase__ : Optional[int] = True
lowercase__ : List[str] = True
lowercase__ : int = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : List[str] = model(**self._prepare_for_class(a , a ) )
self.assertEqual(out_len + 1 , len(a ) )
lowercase__ : int = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _UpperCAmelCase ( self ) -> Optional[int]:
def check_hidden_states_output(a , a , a ):
lowercase__ : Optional[int] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Optional[Any] = model(**self._prepare_for_class(a , a ) )
lowercase__ : Optional[int] = outputs.hidden_states
lowercase__ : List[Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(a ) , a )
lowercase__ : Optional[Any] = self.model_tester.seq_length - self.model_tester.num_masks
lowercase__ : Union[str, Any] = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Tuple = True
check_hidden_states_output(a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Union[str, Any] = True
check_hidden_states_output(a , a , a )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _UpperCAmelCase ( self ) -> List[Any]:
pass
def a_ ( ):
'''simple docstring'''
lowercase__ : int = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
lowercase__ : str = np.load(_lowerCAmelCase )
return list(_lowerCAmelCase )
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
@cached_property
def _UpperCAmelCase ( self ) -> Optional[Any]:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Dict = VideoMAEForVideoClassification.from_pretrained('MCG-NJU/videomae-base-finetuned-kinetics' ).to(
a )
lowercase__ : str = self.default_image_processor
lowercase__ : List[str] = prepare_video()
lowercase__ : int = image_processor(a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ : Union[str, Any] = model(**a )
# verify the logits
lowercase__ : str = torch.Size((1, 4_0_0) )
self.assertEqual(outputs.logits.shape , a )
lowercase__ : List[Any] = torch.tensor([0.3_669, -0.0_688, -0.2_421] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 ) )
@slow
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Optional[int] = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' ).to(a )
lowercase__ : Optional[Any] = self.default_image_processor
lowercase__ : List[str] = prepare_video()
lowercase__ : str = image_processor(a , return_tensors='pt' ).to(a )
# add boolean mask, indicating which patches to mask
lowercase__ : Union[str, Any] = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' )
lowercase__ : str = torch.load(a )
# forward pass
with torch.no_grad():
lowercase__ : List[Any] = model(**a )
# verify the logits
lowercase__ : Dict = torch.Size([1, 1_4_0_8, 1_5_3_6] )
lowercase__ : List[str] = torch.tensor(
[[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] , device=a )
self.assertEqual(outputs.logits.shape , a )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , a , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
lowercase__ : List[Any] = torch.tensor([0.5_142] , device=a )
self.assertTrue(torch.allclose(outputs.loss , a , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
lowercase__ : Tuple = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' , norm_pix_loss=a ).to(
a )
with torch.no_grad():
lowercase__ : Any = model(**a )
lowercase__ : List[Any] = torch.tensor(torch.tensor([0.6_469] ) , device=a )
self.assertTrue(torch.allclose(outputs.loss , a , atol=1e-4 ) )
| 645
| 1
|
"""simple docstring"""
def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : int ):
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
lowercase__ : Optional[Any] = str(bin(_lowerCAmelCase ) )
binary_number += "0" * shift_amount
return binary_number
def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : int ):
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
lowercase__ : Optional[Any] = str(bin(_lowerCAmelCase ) )[2:]
if shift_amount >= len(_lowerCAmelCase ):
return "0b0"
lowercase__ : int = binary_number[: len(_lowerCAmelCase ) - shift_amount]
return "0b" + shifted_binary_number
def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : int ):
'''simple docstring'''
if number >= 0: # Get binary representation of positive number
lowercase__ : Tuple = '0' + str(bin(_lowerCAmelCase ) ).strip('-' )[2:]
else: # Get binary (2's complement) representation of negative number
lowercase__ : Tuple = len(bin(_lowerCAmelCase )[3:] ) # Find 2's complement of number
lowercase__ : str = bin(abs(_lowerCAmelCase ) - (1 << binary_number_length) )[3:]
lowercase__ : Union[str, Any] = (
'1' + '0' * (binary_number_length - len(_lowerCAmelCase )) + binary_number
)
if shift_amount >= len(_lowerCAmelCase ):
return "0b" + binary_number[0] * len(_lowerCAmelCase )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(_lowerCAmelCase ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 645
|
"""simple docstring"""
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
_UpperCamelCase : Dict = logging.get_logger(__name__)
_UpperCamelCase : List[Any] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"encoder.layer_norm_for_extract": "layer_norm_for_extract",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"label_embs_concat": "label_embeddings_concat",
"mask_emb": "masked_spec_embed",
"spk_proj": "speaker_proj",
}
_UpperCamelCase : List[str] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"label_embeddings_concat",
"speaker_proj",
"layer_norm_for_extract",
]
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple ):
'''simple docstring'''
for attribute in key.split('.' ):
lowercase__ : Dict = getattr(_lowerCAmelCase , _lowerCAmelCase )
if weight_type is not None:
lowercase__ : Optional[int] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape
else:
lowercase__ : Optional[int] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
lowercase__ : Optional[Any] = value
elif weight_type == "weight_g":
lowercase__ : Dict = value
elif weight_type == "weight_v":
lowercase__ : List[str] = value
elif weight_type == "bias":
lowercase__ : Optional[Any] = value
else:
lowercase__ : List[str] = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : Tuple = []
lowercase__ : List[str] = fairseq_model.state_dict()
lowercase__ : Union[str, Any] = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
lowercase__ : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == 'group' , )
lowercase__ : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
lowercase__ : List[Any] = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
lowercase__ : int = True
if "*" in mapped_key:
lowercase__ : Optional[int] = name.split(_lowerCAmelCase )[0].split('.' )[-2]
lowercase__ : List[str] = mapped_key.replace('*' , _lowerCAmelCase )
if "weight_g" in name:
lowercase__ : List[Any] = 'weight_g'
elif "weight_v" in name:
lowercase__ : int = 'weight_v'
elif "bias" in name:
lowercase__ : Dict = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase__ : Union[str, Any] = 'weight'
else:
lowercase__ : int = None
set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def a_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Dict ):
'''simple docstring'''
lowercase__ : int = full_name.split('conv_layers.' )[-1]
lowercase__ : int = name.split('.' )
lowercase__ : int = int(items[0] )
lowercase__ : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
lowercase__ : Union[str, Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
lowercase__ : Optional[int] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
lowercase__ : List[Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
lowercase__ : int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_lowerCAmelCase )
@torch.no_grad()
def a_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : str=None , _lowerCAmelCase : Tuple=True ):
'''simple docstring'''
if config_path is not None:
lowercase__ : Any = UniSpeechSatConfig.from_pretrained(_lowerCAmelCase )
else:
lowercase__ : Any = UniSpeechSatConfig()
lowercase__ : Union[str, Any] = ''
if is_finetuned:
lowercase__ : Optional[Any] = UniSpeechSatForCTC(_lowerCAmelCase )
else:
lowercase__ : List[Any] = UniSpeechSatForPreTraining(_lowerCAmelCase )
lowercase__ , lowercase__ , lowercase__ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
lowercase__ : Union[str, Any] = model[0].eval()
recursively_load_weights(_lowerCAmelCase , _lowerCAmelCase )
hf_wavavec.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_UpperCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_UpperCamelCase : str = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 645
| 1
|
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def a_ ( _lowerCAmelCase : Namespace ):
'''simple docstring'''
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
_UpperCamelCase : List[Any] = "\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n"
class UpperCAmelCase_ ( _a):
@staticmethod
def _UpperCAmelCase ( a ) -> Tuple:
lowercase__ : Dict = parser.add_parser(
'convert' , help='CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.' , )
train_parser.add_argument('--model_type' , type=a , required=a , help='Model\'s type.' )
train_parser.add_argument(
'--tf_checkpoint' , type=a , required=a , help='TensorFlow checkpoint path or folder.' )
train_parser.add_argument(
'--pytorch_dump_output' , type=a , required=a , help='Path to the PyTorch saved model output.' )
train_parser.add_argument('--config' , type=a , default='' , help='Configuration file path or folder.' )
train_parser.add_argument(
'--finetuning_task_name' , type=a , default=a , help='Optional fine-tuning task name if the TF model was a finetuned model.' , )
train_parser.set_defaults(func=a )
def __init__( self , a , a , a , a , a , *a , ) -> Union[str, Any]:
lowercase__ : List[str] = logging.get_logger('transformers-cli/converting' )
self._logger.info(f"""Loading model {model_type}""" )
lowercase__ : int = model_type
lowercase__ : Dict = tf_checkpoint
lowercase__ : Union[str, Any] = pytorch_dump_output
lowercase__ : int = config
lowercase__ : str = finetuning_task_name
def _UpperCAmelCase ( self ) -> Dict:
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(a )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a )
if "ckpt" in self._tf_checkpoint.lower():
lowercase__ : Optional[int] = self._tf_checkpoint
lowercase__ : int = ''
else:
lowercase__ : Optional[int] = self._tf_checkpoint
lowercase__ : List[str] = ''
convert_transfo_xl_checkpoint_to_pytorch(
a , self._config , self._pytorch_dump_output , a )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]' )
| 645
|
"""simple docstring"""
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class UpperCAmelCase_ :
def __init__( self , a , a=1_3 , a=3_2 , a=2 , a=3 , a=1_6 , a=[1, 2, 1] , a=[2, 2, 4] , a=2 , a=2.0 , a=True , a=0.0 , a=0.0 , a=0.1 , a="gelu" , a=False , a=True , a=0.02 , a=1e-5 , a=True , a=None , a=True , a=1_0 , a=8 , a=["stage1", "stage2", "stage3"] , a=[1, 2, 3] , ) -> int:
lowercase__ : int = parent
lowercase__ : Union[str, Any] = batch_size
lowercase__ : Dict = image_size
lowercase__ : str = patch_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : List[str] = embed_dim
lowercase__ : Any = depths
lowercase__ : Dict = num_heads
lowercase__ : List[str] = window_size
lowercase__ : int = mlp_ratio
lowercase__ : Tuple = qkv_bias
lowercase__ : Union[str, Any] = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : Tuple = drop_path_rate
lowercase__ : List[str] = hidden_act
lowercase__ : Optional[Any] = use_absolute_embeddings
lowercase__ : Optional[Any] = patch_norm
lowercase__ : Any = layer_norm_eps
lowercase__ : List[Any] = initializer_range
lowercase__ : List[str] = is_training
lowercase__ : int = scope
lowercase__ : Optional[int] = use_labels
lowercase__ : List[Any] = type_sequence_label_size
lowercase__ : List[str] = encoder_stride
lowercase__ : Optional[Any] = out_features
lowercase__ : Dict = out_indices
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Optional[Any] = None
if self.use_labels:
lowercase__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Tuple = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _UpperCAmelCase ( self , a , a , a ) -> Dict:
lowercase__ : Tuple = MaskFormerSwinModel(config=a )
model.to(a )
model.eval()
lowercase__ : str = model(a )
lowercase__ : str = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowercase__ : Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _UpperCAmelCase ( self , a , a , a ) -> Optional[int]:
lowercase__ : List[Any] = MaskFormerSwinBackbone(config=a )
model.to(a )
model.eval()
lowercase__ : int = model(a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [1_3, 1_6, 1_6, 1_6] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [1_6, 3_2, 6_4] )
# verify ValueError
with self.parent.assertRaises(a ):
lowercase__ : Dict = ['stem']
lowercase__ : List[str] = MaskFormerSwinBackbone(config=a )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : int = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Tuple = config_and_inputs
lowercase__ : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : Optional[int] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : List[str] = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
lowerCamelCase__ : str = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : int = False
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : str = MaskFormerSwinModelTester(self )
lowercase__ : Tuple = ConfigTester(self , config_class=a , embed_dim=3_7 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'
' `nn.DataParallel`'
) )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
def _UpperCAmelCase ( self ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCAmelCase ( self ) -> str:
return
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*a )
@unittest.skip('Swin does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> Tuple:
pass
@unittest.skip('Swin does not support feedforward chunking' )
def _UpperCAmelCase ( self ) -> Tuple:
pass
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def _UpperCAmelCase ( self ) -> str:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Any = model_class(a )
lowercase__ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[Any] = [*signature.parameters.keys()]
lowercase__ : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
@unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' )
def _UpperCAmelCase ( self ) -> List[Any]:
pass
@unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' )
def _UpperCAmelCase ( self ) -> int:
pass
def _UpperCAmelCase ( self , a , a , a , a ) -> Tuple:
lowercase__ : Dict = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : str = model(**self._prepare_for_class(a , a ) )
lowercase__ : List[Any] = outputs.hidden_states
lowercase__ : str = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(a ) , a )
# Swin has a different seq_length
lowercase__ : Dict = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowercase__ : List[str] = True
self.check_hidden_states_output(a , a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : List[str] = True
self.check_hidden_states_output(a , a , a , a )
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Union[str, Any] = 3
lowercase__ : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase__ : Tuple = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase__ : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowercase__ : List[str] = True
self.check_hidden_states_output(a , a , a , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : int = True
self.check_hidden_states_output(a , a , a , (padded_height, padded_width) )
@unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def _UpperCAmelCase ( self ) -> Any:
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def _UpperCAmelCase ( self ) -> Any:
pass
def _UpperCAmelCase ( self ) -> Any:
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(a ):
lowercase__ : Union[str, Any] = 0
return t
def check_equivalence(a , a , a , a={} ):
with torch.no_grad():
lowercase__ : Optional[Any] = model(**a , return_dict=a , **a )
lowercase__ : Optional[int] = model(**a , return_dict=a , **a ).to_tuple()
def recursive_check(a , a ):
if isinstance(a , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(a , a ):
recursive_check(a , a )
elif isinstance(a , a ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(a , a )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(a ) , set_nan_tensor_to_zero(a ) , atol=1e-5 ) , msg=(
'Tuple and dict output are not equal. Difference:'
f""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"""
f""" {torch.isnan(a ).any()} and `inf`: {torch.isinf(a )}. Dict has"""
f""" `nan`: {torch.isnan(a ).any()} and `inf`: {torch.isinf(a )}."""
) , )
recursive_check(a , a )
for model_class in self.all_model_classes:
lowercase__ : Any = model_class(a )
model.to(a )
model.eval()
lowercase__ : Tuple = self._prepare_for_class(a , a )
lowercase__ : Optional[Any] = self._prepare_for_class(a , a )
check_equivalence(a , a , a )
lowercase__ : Any = self._prepare_for_class(a , a , return_labels=a )
lowercase__ : List[Any] = self._prepare_for_class(a , a , return_labels=a )
check_equivalence(a , a , a )
lowercase__ : Any = self._prepare_for_class(a , a )
lowercase__ : int = self._prepare_for_class(a , a )
check_equivalence(a , a , a , {'output_hidden_states': True} )
lowercase__ : Dict = self._prepare_for_class(a , a , return_labels=a )
lowercase__ : Optional[int] = self._prepare_for_class(a , a , return_labels=a )
check_equivalence(a , a , a , {'output_hidden_states': True} )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase , _a):
lowerCamelCase__ : Dict = (MaskFormerSwinBackbone,) if is_torch_available() else ()
lowerCamelCase__ : Optional[int] = MaskFormerSwinConfig
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ : Optional[int] = MaskFormerSwinModelTester(self )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : int = inputs_dict['pixel_values'].shape[0]
for backbone_class in self.all_model_classes:
lowercase__ : Optional[Any] = backbone_class(a )
backbone.to(a )
backbone.eval()
lowercase__ : Union[str, Any] = backbone(**a )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , a )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
lowercase__ : List[str] = backbone(**a , output_hidden_states=a )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
lowercase__ , lowercase__ , lowercase__ : int = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
lowercase__ : List[Any] = backbone(**a , output_attentions=a )
self.assertIsNotNone(outputs.attentions )
| 645
| 1
|
"""simple docstring"""
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def a_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : Tuple = StableDiffusionPipeline.from_pretrained(_lowerCAmelCase , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
lowercase__ : Optional[Any] = load_file(_lowerCAmelCase )
lowercase__ : Optional[Any] = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
lowercase__ : Any = key.split('.' )[0].split(LORA_PREFIX_TEXT_ENCODER + '_' )[-1].split('_' )
lowercase__ : Dict = pipeline.text_encoder
else:
lowercase__ : List[str] = key.split('.' )[0].split(LORA_PREFIX_UNET + '_' )[-1].split('_' )
lowercase__ : Optional[Any] = pipeline.unet
# find the target layer
lowercase__ : int = layer_infos.pop(0 )
while len(_lowerCAmelCase ) > -1:
try:
lowercase__ : List[Any] = curr_layer.__getattr__(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
lowercase__ : Optional[int] = layer_infos.pop(0 )
elif len(_lowerCAmelCase ) == 0:
break
except Exception:
if len(_lowerCAmelCase ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
lowercase__ : Optional[int] = layer_infos.pop(0 )
lowercase__ : Tuple = []
if "lora_down" in key:
pair_keys.append(key.replace('lora_down' , 'lora_up' ) )
pair_keys.append(_lowerCAmelCase )
else:
pair_keys.append(_lowerCAmelCase )
pair_keys.append(key.replace('lora_up' , 'lora_down' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
lowercase__ : Optional[int] = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
lowercase__ : Dict = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(_lowerCAmelCase , _lowerCAmelCase ).unsqueeze(2 ).unsqueeze(3 )
else:
lowercase__ : Union[str, Any] = state_dict[pair_keys[0]].to(torch.floataa )
lowercase__ : Union[str, Any] = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(_lowerCAmelCase , _lowerCAmelCase )
# update visited list
for item in pair_keys:
visited.append(_lowerCAmelCase )
return pipeline
if __name__ == "__main__":
_UpperCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"--base_model_path", default=None, type=str, required=True, help="Path to the base model in diffusers format."
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--lora_prefix_unet", default="lora_unet", type=str, help="The prefix of UNet weight in safetensors"
)
parser.add_argument(
"--lora_prefix_text_encoder",
default="lora_te",
type=str,
help="The prefix of text encoder weight in safetensors",
)
parser.add_argument("--alpha", default=0.7_5, type=float, help="The merging ratio in W = W0 + alpha * deltaW")
parser.add_argument(
"--to_safetensors", action="store_true", help="Whether to store pipeline in safetensors format or not."
)
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
_UpperCamelCase : Optional[Any] = parser.parse_args()
_UpperCamelCase : str = args.base_model_path
_UpperCamelCase : Tuple = args.checkpoint_path
_UpperCamelCase : str = args.dump_path
_UpperCamelCase : Tuple = args.lora_prefix_unet
_UpperCamelCase : Optional[int] = args.lora_prefix_text_encoder
_UpperCamelCase : List[Any] = args.alpha
_UpperCamelCase : Any = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
_UpperCamelCase : List[str] = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 645
|
"""simple docstring"""
import math
def a_ ( _lowerCAmelCase : int = 100 ):
'''simple docstring'''
lowercase__ : Union[str, Any] = sum(i * i for i in range(1 , n + 1 ) )
lowercase__ : str = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 645
| 1
|
"""simple docstring"""
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 645
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ , lowercase__ : str = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-canny' , from_pt=a , dtype=jnp.bfloataa )
lowercase__ , lowercase__ : List[str] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=a , from_pt=a , dtype=jnp.bfloataa )
lowercase__ : List[Any] = controlnet_params
lowercase__ : int = 'bird'
lowercase__ : List[Any] = jax.device_count()
lowercase__ : Dict = pipe.prepare_text_inputs([prompts] * num_samples )
lowercase__ : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' )
lowercase__ : Optional[int] = pipe.prepare_image_inputs([canny_image] * num_samples )
lowercase__ : List[Any] = jax.random.PRNGKey(0 )
lowercase__ : Tuple = jax.random.split(a , jax.device_count() )
lowercase__ : str = replicate(a )
lowercase__ : List[str] = shard(a )
lowercase__ : Dict = shard(a )
lowercase__ : List[Any] = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=5_0 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3)
lowercase__ : Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowercase__ : Tuple = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
lowercase__ : int = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase__ : Optional[Any] = jnp.array(
[0.167_969, 0.116_699, 0.081_543, 0.154_297, 0.132_812, 0.108_887, 0.169_922, 0.169_922, 0.205_078] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ , lowercase__ : int = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-openpose' , from_pt=a , dtype=jnp.bfloataa )
lowercase__ , lowercase__ : Optional[Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=a , from_pt=a , dtype=jnp.bfloataa )
lowercase__ : Optional[Any] = controlnet_params
lowercase__ : List[Any] = 'Chef in the kitchen'
lowercase__ : List[str] = jax.device_count()
lowercase__ : Dict = pipe.prepare_text_inputs([prompts] * num_samples )
lowercase__ : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' )
lowercase__ : Optional[int] = pipe.prepare_image_inputs([pose_image] * num_samples )
lowercase__ : List[str] = jax.random.PRNGKey(0 )
lowercase__ : str = jax.random.split(a , jax.device_count() )
lowercase__ : Optional[Any] = replicate(a )
lowercase__ : Optional[Any] = shard(a )
lowercase__ : List[Any] = shard(a )
lowercase__ : List[Any] = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=5_0 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3)
lowercase__ : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowercase__ : List[str] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
lowercase__ : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase__ : str = jnp.array(
[[0.271_484, 0.261_719, 0.275_391, 0.277_344, 0.279_297, 0.291_016, 0.294_922, 0.302_734, 0.302_734]] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 645
| 1
|
"""simple docstring"""
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
_UpperCamelCase : Tuple = logging.get_logger(__name__)
enable_full_determinism()
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : str = UNetaDModel
lowerCamelCase__ : int = "sample"
@property
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : Union[str, Any] = 4
lowercase__ : List[str] = 3
lowercase__ : Union[str, Any] = (3_2, 3_2)
lowercase__ : Tuple = floats_tensor((batch_size, num_channels) + sizes ).to(a )
lowercase__ : List[str] = torch.tensor([1_0] ).to(a )
return {"sample": noise, "timestep": time_step}
@property
def _UpperCAmelCase ( self ) -> List[str]:
return (3, 3_2, 3_2)
@property
def _UpperCAmelCase ( self ) -> List[Any]:
return (3, 3_2, 3_2)
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : int = {
'block_out_channels': (3_2, 6_4),
'down_block_types': ('DownBlock2D', 'AttnDownBlock2D'),
'up_block_types': ('AttnUpBlock2D', 'UpBlock2D'),
'attention_head_dim': 3,
'out_channels': 3,
'in_channels': 3,
'layers_per_block': 2,
'sample_size': 3_2,
}
lowercase__ : str = self.dummy_input
return init_dict, inputs_dict
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : Optional[int] = UNetaDModel
lowerCamelCase__ : int = "sample"
@property
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : Optional[int] = 4
lowercase__ : Tuple = 4
lowercase__ : Tuple = (3_2, 3_2)
lowercase__ : Union[str, Any] = floats_tensor((batch_size, num_channels) + sizes ).to(a )
lowercase__ : Any = torch.tensor([1_0] ).to(a )
return {"sample": noise, "timestep": time_step}
@property
def _UpperCAmelCase ( self ) -> Tuple:
return (4, 3_2, 3_2)
@property
def _UpperCAmelCase ( self ) -> List[Any]:
return (4, 3_2, 3_2)
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Tuple = {
'sample_size': 3_2,
'in_channels': 4,
'out_channels': 4,
'layers_per_block': 2,
'block_out_channels': (3_2, 6_4),
'attention_head_dim': 3_2,
'down_block_types': ('DownBlock2D', 'DownBlock2D'),
'up_block_types': ('UpBlock2D', 'UpBlock2D'),
}
lowercase__ : str = self.dummy_input
return init_dict, inputs_dict
def _UpperCAmelCase ( self ) -> Any:
lowercase__ , lowercase__ : Optional[int] = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=a )
self.assertIsNotNone(a )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(a )
lowercase__ : Dict = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' , 'This test is supposed to run on GPU' )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ , lowercase__ : int = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=a )
model.to(a )
lowercase__ : Tuple = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' , 'This test is supposed to run on GPU' )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
lowercase__ , lowercase__ : Optional[Any] = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=a )
model_accelerate.to(a )
model_accelerate.eval()
lowercase__ : List[str] = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
lowercase__ : Any = noise.to(a )
lowercase__ : Optional[Any] = torch.tensor([1_0] * noise.shape[0] ).to(a )
lowercase__ : int = model_accelerate(a , a )['sample']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
lowercase__ , lowercase__ : Any = UNetaDModel.from_pretrained(
'fusing/unet-ldm-dummy-update' , output_loading_info=a , low_cpu_mem_usage=a )
model_normal_load.to(a )
model_normal_load.eval()
lowercase__ : Tuple = model_normal_load(a , a )['sample']
assert torch_all_close(a , a , rtol=1e-3 )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : List[Any] = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' )
model.eval()
model.to(a )
lowercase__ : Dict = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
lowercase__ : str = noise.to(a )
lowercase__ : Union[str, Any] = torch.tensor([1_0] * noise.shape[0] ).to(a )
with torch.no_grad():
lowercase__ : Any = model(a , a ).sample
lowercase__ : Optional[int] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowercase__ : Union[str, Any] = torch.tensor([-13.3_258, -20.1_100, -15.9_873, -17.6_617, -23.0_596, -17.9_419, -13.3_675, -16.1_889, -12.3_800] )
# fmt: on
self.assertTrue(torch_all_close(a , a , rtol=1e-3 ) )
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : Any = UNetaDModel
lowerCamelCase__ : str = "sample"
@property
def _UpperCAmelCase ( self , a=(3_2, 3_2) ) -> Tuple:
lowercase__ : List[Any] = 4
lowercase__ : Dict = 3
lowercase__ : List[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(a )
lowercase__ : Optional[int] = torch.tensor(batch_size * [1_0] ).to(dtype=torch.intaa , device=a )
return {"sample": noise, "timestep": time_step}
@property
def _UpperCAmelCase ( self ) -> Optional[int]:
return (3, 3_2, 3_2)
@property
def _UpperCAmelCase ( self ) -> Tuple:
return (3, 3_2, 3_2)
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Tuple = {
'block_out_channels': [3_2, 6_4, 6_4, 6_4],
'in_channels': 3,
'layers_per_block': 1,
'out_channels': 3,
'time_embedding_type': 'fourier',
'norm_eps': 1e-6,
'mid_block_scale_factor': math.sqrt(2.0 ),
'norm_num_groups': None,
'down_block_types': [
'SkipDownBlock2D',
'AttnSkipDownBlock2D',
'SkipDownBlock2D',
'SkipDownBlock2D',
],
'up_block_types': [
'SkipUpBlock2D',
'SkipUpBlock2D',
'AttnSkipUpBlock2D',
'SkipUpBlock2D',
],
}
lowercase__ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
@slow
def _UpperCAmelCase ( self ) -> Any:
lowercase__ , lowercase__ : str = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' , output_loading_info=a )
self.assertIsNotNone(a )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(a )
lowercase__ : str = self.dummy_input
lowercase__ : Dict = floats_tensor((4, 3) + (2_5_6, 2_5_6) ).to(a )
lowercase__ : str = noise
lowercase__ : Optional[Any] = model(**a )
assert image is not None, "Make sure output is not None"
@slow
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : Any = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' )
model.to(a )
lowercase__ : Optional[Any] = 4
lowercase__ : int = 3
lowercase__ : Optional[Any] = (2_5_6, 2_5_6)
lowercase__ : Dict = torch.ones((batch_size, num_channels) + sizes ).to(a )
lowercase__ : Union[str, Any] = torch.tensor(batch_size * [1e-4] ).to(a )
with torch.no_grad():
lowercase__ : Tuple = model(a , a ).sample
lowercase__ : List[Any] = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
lowercase__ : List[str] = torch.tensor([-4_842.8_691, -6_499.6_631, -3_800.1_953, -7_978.2_686, -10_980.7_129, -20_028.8_535, 8_148.2_822, 2_342.2_905, 567.7_608] )
# fmt: on
self.assertTrue(torch_all_close(a , a , rtol=1e-2 ) )
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ : Any = UNetaDModel.from_pretrained('fusing/ncsnpp-ffhq-ve-dummy-update' )
model.to(a )
lowercase__ : Dict = 4
lowercase__ : List[str] = 3
lowercase__ : List[str] = (3_2, 3_2)
lowercase__ : Union[str, Any] = torch.ones((batch_size, num_channels) + sizes ).to(a )
lowercase__ : Union[str, Any] = torch.tensor(batch_size * [1e-4] ).to(a )
with torch.no_grad():
lowercase__ : List[str] = model(a , a ).sample
lowercase__ : int = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
lowercase__ : Dict = torch.tensor([-0.0_325, -0.0_900, -0.0_869, -0.0_332, -0.0_725, -0.0_270, -0.0_101, 0.0_227, 0.0_256] )
# fmt: on
self.assertTrue(torch_all_close(a , a , rtol=1e-2 ) )
def _UpperCAmelCase ( self ) -> Any:
# not required for this model
pass
| 645
|
"""simple docstring"""
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 645
| 1
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class UpperCAmelCase_ :
def __init__( self , a , a=1_3 , a=7 , a=True , a=True , a=True , a=True , a=9_9 , a=3_2 , a=2 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=5_1_2 , a=1_6 , a=2 , a=0.02 , a=3 , a=4 , a=None , a=0 , ) -> str:
lowercase__ : List[str] = parent
lowercase__ : List[str] = batch_size
lowercase__ : Optional[int] = seq_length
lowercase__ : Any = is_training
lowercase__ : Tuple = use_input_mask
lowercase__ : Optional[Any] = use_token_type_ids
lowercase__ : Union[str, Any] = use_labels
lowercase__ : List[str] = vocab_size
lowercase__ : Any = hidden_size
lowercase__ : List[Any] = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
lowercase__ : Optional[int] = intermediate_size
lowercase__ : Dict = hidden_act
lowercase__ : Dict = hidden_dropout_prob
lowercase__ : Tuple = attention_probs_dropout_prob
lowercase__ : str = max_position_embeddings
lowercase__ : Optional[int] = type_vocab_size
lowercase__ : Optional[int] = type_sequence_label_size
lowercase__ : Tuple = initializer_range
lowercase__ : List[Any] = num_labels
lowercase__ : List[str] = num_choices
lowercase__ : int = scope
lowercase__ : int = projection_dim
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : List[str] = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
lowercase__ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Optional[Any] = None
if self.use_token_type_ids:
lowercase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ : List[Any] = None
lowercase__ : Optional[Any] = None
lowercase__ : List[str] = None
if self.use_labels:
lowercase__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ : int = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
lowercase__ : int = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self , a , a , a , a , a , a , a ) -> List[Any]:
lowercase__ : Optional[int] = TFDPRContextEncoder(config=a )
lowercase__ : Any = model(a , attention_mask=a , token_type_ids=a )
lowercase__ : Any = model(a , token_type_ids=a )
lowercase__ : Tuple = model(a )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def _UpperCAmelCase ( self , a , a , a , a , a , a , a ) -> List[str]:
lowercase__ : List[Any] = TFDPRQuestionEncoder(config=a )
lowercase__ : List[Any] = model(a , attention_mask=a , token_type_ids=a )
lowercase__ : str = model(a , token_type_ids=a )
lowercase__ : Optional[Any] = model(a )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def _UpperCAmelCase ( self , a , a , a , a , a , a , a ) -> Tuple:
lowercase__ : int = TFDPRReader(config=a )
lowercase__ : Union[str, Any] = model(a , attention_mask=a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : int = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) : Any = config_and_inputs
lowercase__ : Optional[Any] = {'input_ids': input_ids}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : str = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
lowerCamelCase__ : List[str] = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {}
lowerCamelCase__ : Any = False
lowerCamelCase__ : Union[str, Any] = False
lowerCamelCase__ : Tuple = False
lowerCamelCase__ : Optional[Any] = False
lowerCamelCase__ : int = False
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Tuple = TFDPRModelTester(self )
lowercase__ : str = ConfigTester(self , config_class=a , hidden_size=3_7 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> int:
lowercase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*a )
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*a )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*a )
@slow
def _UpperCAmelCase ( self ) -> Optional[Any]:
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : List[Any] = TFDPRContextEncoder.from_pretrained(a )
self.assertIsNotNone(a )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Tuple = TFDPRContextEncoder.from_pretrained(a )
self.assertIsNotNone(a )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Union[str, Any] = TFDPRQuestionEncoder.from_pretrained(a )
self.assertIsNotNone(a )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Tuple = TFDPRReader.from_pretrained(a )
self.assertIsNotNone(a )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Any = TFDPRQuestionEncoder.from_pretrained('facebook/dpr-question_encoder-single-nq-base' )
lowercase__ : List[Any] = tf.constant(
[[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_0_3, 2_0_2_6, 3_8_9_9, 1_0_1_4_0, 1_0_2_9, 1_0_2]] ) # [CLS] hello, is my dog cute? [SEP]
lowercase__ : Optional[int] = model(a )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
lowercase__ : Tuple = tf.constant(
[
[
0.03_236_253,
0.12_753_335,
0.16_818_509,
0.00_279_786,
0.3_896_933,
0.24_264_945,
0.2_178_971,
-0.02_335_227,
-0.08_481_959,
-0.14_324_117,
]
] )
self.assertTrue(numpy.allclose(output[:, :1_0].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 645
|
"""simple docstring"""
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Optional[Any] = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
lowercase__ : Union[str, Any] = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
model.to(a )
from datasets import load_dataset
lowercase__ : str = load_dataset('nielsr/rvlcdip-demo' )
lowercase__ : Tuple = dataset['train'][0]['image'].convert('RGB' )
lowercase__ : int = image_processor(a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ : List[str] = model(**a )
lowercase__ : List[Any] = outputs.logits
lowercase__ : Union[str, Any] = torch.Size((1, 1_6) )
self.assertEqual(logits.shape , a )
lowercase__ : Tuple = torch.tensor(
[-0.4_158, -0.4_092, -0.4_347] , device=a , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , a , atol=1e-4 ) )
| 645
| 1
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
_UpperCamelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCAmelCase_ ( _a):
def __init__( self , a , a ) -> int:
super().__init__()
self.register_modules(unet=a , scheduler=a )
@torch.no_grad()
def __call__( self , a = 1 , a = 1_0_0 , a = None , a = None , a = True , ) -> Union[AudioPipelineOutput, Tuple]:
if audio_length_in_s is None:
lowercase__ : Dict = self.unet.config.sample_size / self.unet.config.sample_rate
lowercase__ : Optional[Any] = audio_length_in_s * self.unet.config.sample_rate
lowercase__ : Any = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f"""{audio_length_in_s} is too small. Make sure it's bigger or equal to"""
f""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" )
lowercase__ : List[Any] = int(a )
if sample_size % down_scale_factor != 0:
lowercase__ : Tuple = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"""
f""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"""
' process.' )
lowercase__ : Union[str, Any] = int(a )
lowercase__ : str = next(iter(self.unet.parameters() ) ).dtype
lowercase__ : Optional[Any] = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(a , a ) and len(a ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(a )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
lowercase__ : Tuple = randn_tensor(a , generator=a , device=self.device , dtype=a )
# set step values
self.scheduler.set_timesteps(a , device=audio.device )
lowercase__ : Union[str, Any] = self.scheduler.timesteps.to(a )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowercase__ : Dict = self.unet(a , a ).sample
# 2. compute previous image: x_t -> t_t-1
lowercase__ : Dict = self.scheduler.step(a , a , a ).prev_sample
lowercase__ : str = audio.clamp(-1 , 1 ).float().cpu().numpy()
lowercase__ : List[str] = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=a )
| 645
|
"""simple docstring"""
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase_ :
@staticmethod
def _UpperCAmelCase ( *a , **a ) -> int:
pass
def a_ ( _lowerCAmelCase : Image ):
'''simple docstring'''
lowercase__ : List[str] = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
lowerCamelCase__ : Union[str, Any] = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def _UpperCAmelCase ( self , a , a , a ) -> Dict:
lowercase__ : Union[str, Any] = DepthEstimationPipeline(model=a , image_processor=a )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def _UpperCAmelCase ( self , a , a ) -> Optional[int]:
lowercase__ : Tuple = depth_estimator('./tests/fixtures/tests_samples/COCO/000000039769.png' )
self.assertEqual({'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )} , a )
import datasets
lowercase__ : Tuple = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' )
lowercase__ : List[Any] = depth_estimator(
[
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
] )
self.assertEqual(
[
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
] , a , )
@require_tf
@unittest.skip('Depth estimation is not implemented in TF' )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
@slow
@require_torch
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Tuple = 'Intel/dpt-large'
lowercase__ : Optional[int] = pipeline('depth-estimation' , model=a )
lowercase__ : List[Any] = depth_estimator('http://images.cocodataset.org/val2017/000000039769.jpg' )
lowercase__ : Optional[Any] = hashimage(outputs['depth'] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['predicted_depth'].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs['predicted_depth'].min().item() ) , 2.662 )
@require_torch
def _UpperCAmelCase ( self ) -> Optional[int]:
# This is highly irregular to have no small tests.
self.skipTest('There is not hf-internal-testing tiny model for either GLPN nor DPT' )
| 645
| 1
|
"""simple docstring"""
class UpperCAmelCase_ :
def __init__( self , a = "" , a = False ) -> None:
# Mapping from the first character of the prefix of the node
lowercase__ : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
lowercase__ : Tuple = is_leaf
lowercase__ : List[Any] = prefix
def _UpperCAmelCase ( self , a ) -> tuple[str, str, str]:
lowercase__ : Union[str, Any] = 0
for q, w in zip(self.prefix , a ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def _UpperCAmelCase ( self , a ) -> None:
for word in words:
self.insert(a )
def _UpperCAmelCase ( self , a ) -> None:
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
lowercase__ : Any = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
lowercase__ : Tuple = RadixNode(prefix=a , is_leaf=a )
else:
lowercase__ : List[str] = self.nodes[word[0]]
lowercase__ , lowercase__ , lowercase__ : Optional[int] = incoming_node.match(
a )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(a )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
lowercase__ : Optional[int] = remaining_prefix
lowercase__ : List[Any] = self.nodes[matching_string[0]]
lowercase__ : List[str] = RadixNode(a , a )
lowercase__ : Tuple = aux_node
if remaining_word == "":
lowercase__ : Optional[Any] = True
else:
self.nodes[matching_string[0]].insert(a )
def _UpperCAmelCase ( self , a ) -> bool:
lowercase__ : Union[str, Any] = self.nodes.get(word[0] , a )
if not incoming_node:
return False
else:
lowercase__ , lowercase__ , lowercase__ : Dict = incoming_node.match(
a )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(a )
def _UpperCAmelCase ( self , a ) -> bool:
lowercase__ : str = self.nodes.get(word[0] , a )
if not incoming_node:
return False
else:
lowercase__ , lowercase__ , lowercase__ : Optional[int] = incoming_node.match(
a )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(a )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
lowercase__ : Optional[Any] = list(self.nodes.values() )[0]
lowercase__ : int = merging_node.is_leaf
self.prefix += merging_node.prefix
lowercase__ : Optional[int] = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
lowercase__ : Any = False
# If there is 1 edge, we merge it with its child
else:
lowercase__ : Any = list(incoming_node.nodes.values() )[0]
lowercase__ : Tuple = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
lowercase__ : Dict = merging_node.nodes
return True
def _UpperCAmelCase ( self , a = 0 ) -> None:
if self.prefix != "":
print('-' * height , self.prefix , ' (leaf)' if self.is_leaf else '' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def a_ ( ):
'''simple docstring'''
lowercase__ : Optional[Any] = 'banana bananas bandana band apple all beast'.split()
lowercase__ : Optional[int] = RadixNode()
root.insert_many(_lowerCAmelCase )
assert all(root.find(_lowerCAmelCase ) for word in words )
assert not root.find('bandanas' )
assert not root.find('apps' )
root.delete('all' )
assert not root.find('all' )
root.delete('banana' )
assert not root.find('banana' )
assert root.find('bananas' )
return True
def a_ ( ):
'''simple docstring'''
assert test_trie()
def a_ ( ):
'''simple docstring'''
lowercase__ : Any = RadixNode()
lowercase__ : Union[str, Any] = 'banana bananas bandanas bandana band apple all beast'.split()
root.insert_many(_lowerCAmelCase )
print('Words:' , _lowerCAmelCase )
print('Tree:' )
root.print_tree()
if __name__ == "__main__":
main()
| 645
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class UpperCAmelCase_ ( _a):
def __init__( self ) -> Any:
lowercase__ : Tuple = []
def _UpperCAmelCase ( self , a , a , a , **a ) -> Any:
self.events.append('on_init_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Optional[int]:
self.events.append('on_train_begin' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> List[str]:
self.events.append('on_train_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> int:
self.events.append('on_epoch_begin' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Optional[Any]:
self.events.append('on_epoch_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> int:
self.events.append('on_step_begin' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> str:
self.events.append('on_step_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> int:
self.events.append('on_evaluate' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Tuple:
self.events.append('on_predict' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Union[str, Any]:
self.events.append('on_save' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> List[str]:
self.events.append('on_log' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Any:
self.events.append('on_prediction_step' )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self ) -> str:
lowercase__ : str = tempfile.mkdtemp()
def _UpperCAmelCase ( self ) -> Dict:
shutil.rmtree(self.output_dir )
def _UpperCAmelCase ( self , a=0 , a=0 , a=6_4 , a=6_4 , a=None , a=False , **a ) -> int:
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
lowercase__ : str = RegressionDataset(length=a )
lowercase__ : Any = RegressionDataset(length=a )
lowercase__ : Optional[Any] = RegressionModelConfig(a=a , b=a )
lowercase__ : Union[str, Any] = RegressionPreTrainedModel(a )
lowercase__ : Tuple = TrainingArguments(self.output_dir , disable_tqdm=a , report_to=[] , **a )
return Trainer(
a , a , train_dataset=a , eval_dataset=a , callbacks=a , )
def _UpperCAmelCase ( self , a , a ) -> Union[str, Any]:
self.assertEqual(len(a ) , len(a ) )
# Order doesn't matter
lowercase__ : Optional[int] = sorted(a , key=lambda a : cb.__name__ if isinstance(a , a ) else cb.__class__.__name__ )
lowercase__ : Tuple = sorted(a , key=lambda a : cb.__name__ if isinstance(a , a ) else cb.__class__.__name__ )
for cba, cba in zip(a , a ):
if isinstance(a , a ) and isinstance(a , a ):
self.assertEqual(a , a )
elif isinstance(a , a ) and not isinstance(a , a ):
self.assertEqual(a , cba.__class__ )
elif not isinstance(a , a ) and isinstance(a , a ):
self.assertEqual(cba.__class__ , a )
else:
self.assertEqual(a , a )
def _UpperCAmelCase ( self , a ) -> Optional[Any]:
lowercase__ : Dict = ['on_init_end', 'on_train_begin']
lowercase__ : List[Any] = 0
lowercase__ : Optional[int] = len(trainer.get_eval_dataloader() )
lowercase__ : Tuple = ['on_prediction_step'] * len(trainer.get_eval_dataloader() ) + ['on_log', 'on_evaluate']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('on_epoch_begin' )
for _ in range(a ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('on_log' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('on_save' )
expected_events.append('on_epoch_end' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : int = self.get_trainer()
lowercase__ : str = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
# Callbacks passed at init are added to the default callbacks
lowercase__ : str = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
lowercase__ : List[Any] = self.get_trainer(disable_tqdm=a )
lowercase__ : Optional[Any] = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : int = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
lowercase__ : List[str] = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(a )
expected_callbacks.remove(a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
lowercase__ : Optional[Any] = self.get_trainer()
lowercase__ : List[Any] = trainer.pop_callback(a )
self.assertEqual(cb.__class__ , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
trainer.add_callback(a )
expected_callbacks.insert(0 , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
# We can also add, pop, or remove by instance
lowercase__ : int = self.get_trainer()
lowercase__ : List[str] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(a )
expected_callbacks.remove(a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
lowercase__ : Tuple = self.get_trainer()
lowercase__ : Dict = trainer.callback_handler.callbacks[0]
lowercase__ : Union[str, Any] = trainer.pop_callback(a )
self.assertEqual(a , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
trainer.add_callback(a )
expected_callbacks.insert(0 , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
def _UpperCAmelCase ( self ) -> Tuple:
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='ignore' , category=a )
lowercase__ : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
lowercase__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
# Independent log/save/eval
lowercase__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
lowercase__ : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
lowercase__ : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
lowercase__ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
lowercase__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='steps' )
trainer.train()
lowercase__ : Optional[int] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
lowercase__ : int = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='epoch' )
trainer.train()
lowercase__ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
# A bit of everything
lowercase__ : Any = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=1_0 , eval_steps=5 , evaluation_strategy='steps' , )
trainer.train()
lowercase__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
# warning should be emitted for duplicated callbacks
with patch('transformers.trainer_callback.logger.warning' ) as warn_mock:
lowercase__ : str = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(a ) in warn_mock.call_args[0][0]
| 645
| 1
|
"""simple docstring"""
from heapq import heappop, heappush
import numpy as np
def a_ ( _lowerCAmelCase : np.ndarray , _lowerCAmelCase : tuple[int, int] , _lowerCAmelCase : tuple[int, int] , _lowerCAmelCase : bool , ):
'''simple docstring'''
lowercase__ , lowercase__ : Optional[int] = grid.shape
lowercase__ : List[Any] = [-1, 1, 0, 0]
lowercase__ : str = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
lowercase__ , lowercase__ : List[str] = [(0, source)], set()
lowercase__ : Optional[int] = np.full((rows, cols) , np.inf )
lowercase__ : Union[str, Any] = 0
lowercase__ : List[str] = np.empty((rows, cols) , dtype=_lowerCAmelCase )
lowercase__ : Any = None
while queue:
((lowercase__) , (lowercase__)) : Union[str, Any] = heappop(_lowerCAmelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
lowercase__ : str = []
while (x, y) != source:
path.append((x, y) )
lowercase__ , lowercase__ : Union[str, Any] = predecessors[x, y]
path.append(_lowerCAmelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(_lowerCAmelCase ) ):
lowercase__ , lowercase__ : Any = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
lowercase__ : Any = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(_lowerCAmelCase , (dist + 1, (nx, ny)) )
lowercase__ : List[str] = dist + 1
lowercase__ : Optional[int] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 645
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_UpperCamelCase : str = {
"configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Tuple = [
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoForQuestionAnswering",
"GPTNeoForSequenceClassification",
"GPTNeoForTokenClassification",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Dict = [
"FlaxGPTNeoForCausalLM",
"FlaxGPTNeoModel",
"FlaxGPTNeoPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
_UpperCamelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 645
| 1
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_a)
class UpperCAmelCase_ ( _a):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
lowerCamelCase__ : str = field(default="summarization" , metadata={"include_in_asdict_even_if_is_default": True})
lowerCamelCase__ : ClassVar[Features] = Features({"text": Value("string")})
lowerCamelCase__ : ClassVar[Features] = Features({"summary": Value("string")})
lowerCamelCase__ : str = "text"
lowerCamelCase__ : str = "summary"
@property
def _UpperCAmelCase ( self ) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 645
|
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self , a ) -> str:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
lowercase__ : str = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(a )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Dict = 'sshleifer/tiny-gpt2'
lowercase__ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a , multi_process=a , )
lowercase__ : str = TensorFlowBenchmark(a )
lowercase__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : List[str] = 'sgugger/tiny-distilbert-classification'
lowercase__ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , only_pretrain_model=a , )
lowercase__ : Optional[Any] = TensorFlowBenchmark(a )
lowercase__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : Optional[int] = 'sshleifer/tiny-gpt2'
lowercase__ : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : Optional[Any] = TensorFlowBenchmark(a )
lowercase__ : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : Any = 'sshleifer/tiny-gpt2'
lowercase__ : List[Any] = AutoConfig.from_pretrained(a )
lowercase__ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a , multi_process=a , )
lowercase__ : Tuple = TensorFlowBenchmark(a , [config] )
lowercase__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Optional[Any] = 'sshleifer/tiny-gpt2'
lowercase__ : List[str] = AutoConfig.from_pretrained(a )
lowercase__ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : List[str] = TensorFlowBenchmark(a , [config] )
lowercase__ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Optional[Any] = 'sshleifer/tiny-gpt2'
lowercase__ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : Optional[Any] = TensorFlowBenchmark(a )
lowercase__ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Optional[Any] = 'sshleifer/tiny-gpt2'
lowercase__ : Optional[int] = AutoConfig.from_pretrained(a )
lowercase__ : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : str = TensorFlowBenchmark(a , [config] )
lowercase__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : List[str] = 'patrickvonplaten/t5-tiny-random'
lowercase__ : Any = AutoConfig.from_pretrained(a )
lowercase__ : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : int = TensorFlowBenchmark(a , configs=[config] )
lowercase__ : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 , 'Cannot do xla on CPU.' )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Any = 'sshleifer/tiny-gpt2'
lowercase__ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , use_xla=a , multi_process=a , )
lowercase__ : Any = TensorFlowBenchmark(a )
lowercase__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Any = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=a , save_to_csv=a , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(a , 'inf_time.csv' ) , inference_memory_csv_file=os.path.join(a , 'inf_mem.csv' ) , env_info_csv_file=os.path.join(a , 'env.csv' ) , multi_process=a , )
lowercase__ : Union[str, Any] = TensorFlowBenchmark(a )
benchmark.run()
self.assertTrue(Path(os.path.join(a , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(a , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(a , 'env.csv' ) ).exists() )
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ : Tuple = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(a ):
self.assertTrue(hasattr(a , 'sequential' ) )
self.assertTrue(hasattr(a , 'cumulative' ) )
self.assertTrue(hasattr(a , 'current' ) )
self.assertTrue(hasattr(a , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=a , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(a , 'log.txt' ) , log_print=a , trace_memory_line_by_line=a , eager_mode=a , multi_process=a , )
lowercase__ : Optional[int] = TensorFlowBenchmark(a )
lowercase__ : Optional[Any] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(a , 'log.txt' ) ).exists() )
| 645
| 1
|
"""simple docstring"""
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
_UpperCamelCase : Tuple = logging.get_logger(__name__)
_UpperCamelCase : Tuple = OrderedDict(
[
# Base model mapping
("albert", "FlaxAlbertModel"),
("bart", "FlaxBartModel"),
("beit", "FlaxBeitModel"),
("bert", "FlaxBertModel"),
("big_bird", "FlaxBigBirdModel"),
("blenderbot", "FlaxBlenderbotModel"),
("blenderbot-small", "FlaxBlenderbotSmallModel"),
("clip", "FlaxCLIPModel"),
("distilbert", "FlaxDistilBertModel"),
("electra", "FlaxElectraModel"),
("gpt-sw3", "FlaxGPT2Model"),
("gpt2", "FlaxGPT2Model"),
("gpt_neo", "FlaxGPTNeoModel"),
("gptj", "FlaxGPTJModel"),
("longt5", "FlaxLongT5Model"),
("marian", "FlaxMarianModel"),
("mbart", "FlaxMBartModel"),
("mt5", "FlaxMT5Model"),
("opt", "FlaxOPTModel"),
("pegasus", "FlaxPegasusModel"),
("regnet", "FlaxRegNetModel"),
("resnet", "FlaxResNetModel"),
("roberta", "FlaxRobertaModel"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"),
("roformer", "FlaxRoFormerModel"),
("t5", "FlaxT5Model"),
("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"),
("vit", "FlaxViTModel"),
("wav2vec2", "FlaxWav2Vec2Model"),
("whisper", "FlaxWhisperModel"),
("xglm", "FlaxXGLMModel"),
("xlm-roberta", "FlaxXLMRobertaModel"),
]
)
_UpperCamelCase : str = OrderedDict(
[
# Model for pre-training mapping
("albert", "FlaxAlbertForPreTraining"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForPreTraining"),
("big_bird", "FlaxBigBirdForPreTraining"),
("electra", "FlaxElectraForPreTraining"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("t5", "FlaxT5ForConditionalGeneration"),
("wav2vec2", "FlaxWav2Vec2ForPreTraining"),
("whisper", "FlaxWhisperForConditionalGeneration"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
_UpperCamelCase : int = OrderedDict(
[
# Model for Masked LM mapping
("albert", "FlaxAlbertForMaskedLM"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForMaskedLM"),
("big_bird", "FlaxBigBirdForMaskedLM"),
("distilbert", "FlaxDistilBertForMaskedLM"),
("electra", "FlaxElectraForMaskedLM"),
("mbart", "FlaxMBartForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
_UpperCamelCase : List[Any] = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("bart", "FlaxBartForConditionalGeneration"),
("blenderbot", "FlaxBlenderbotForConditionalGeneration"),
("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"),
("encoder-decoder", "FlaxEncoderDecoderModel"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("marian", "FlaxMarianMTModel"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("pegasus", "FlaxPegasusForConditionalGeneration"),
("t5", "FlaxT5ForConditionalGeneration"),
]
)
_UpperCamelCase : Optional[int] = OrderedDict(
[
# Model for Image-classsification
("beit", "FlaxBeitForImageClassification"),
("regnet", "FlaxRegNetForImageClassification"),
("resnet", "FlaxResNetForImageClassification"),
("vit", "FlaxViTForImageClassification"),
]
)
_UpperCamelCase : List[str] = OrderedDict(
[
("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"),
]
)
_UpperCamelCase : List[Any] = OrderedDict(
[
# Model for Causal LM mapping
("bart", "FlaxBartForCausalLM"),
("bert", "FlaxBertForCausalLM"),
("big_bird", "FlaxBigBirdForCausalLM"),
("electra", "FlaxElectraForCausalLM"),
("gpt-sw3", "FlaxGPT2LMHeadModel"),
("gpt2", "FlaxGPT2LMHeadModel"),
("gpt_neo", "FlaxGPTNeoForCausalLM"),
("gptj", "FlaxGPTJForCausalLM"),
("opt", "FlaxOPTForCausalLM"),
("roberta", "FlaxRobertaForCausalLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"),
("xglm", "FlaxXGLMForCausalLM"),
("xlm-roberta", "FlaxXLMRobertaForCausalLM"),
]
)
_UpperCamelCase : Optional[Any] = OrderedDict(
[
# Model for Sequence Classification mapping
("albert", "FlaxAlbertForSequenceClassification"),
("bart", "FlaxBartForSequenceClassification"),
("bert", "FlaxBertForSequenceClassification"),
("big_bird", "FlaxBigBirdForSequenceClassification"),
("distilbert", "FlaxDistilBertForSequenceClassification"),
("electra", "FlaxElectraForSequenceClassification"),
("mbart", "FlaxMBartForSequenceClassification"),
("roberta", "FlaxRobertaForSequenceClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"),
("roformer", "FlaxRoFormerForSequenceClassification"),
("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"),
]
)
_UpperCamelCase : Optional[int] = OrderedDict(
[
# Model for Question Answering mapping
("albert", "FlaxAlbertForQuestionAnswering"),
("bart", "FlaxBartForQuestionAnswering"),
("bert", "FlaxBertForQuestionAnswering"),
("big_bird", "FlaxBigBirdForQuestionAnswering"),
("distilbert", "FlaxDistilBertForQuestionAnswering"),
("electra", "FlaxElectraForQuestionAnswering"),
("mbart", "FlaxMBartForQuestionAnswering"),
("roberta", "FlaxRobertaForQuestionAnswering"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"),
("roformer", "FlaxRoFormerForQuestionAnswering"),
("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"),
]
)
_UpperCamelCase : Tuple = OrderedDict(
[
# Model for Token Classification mapping
("albert", "FlaxAlbertForTokenClassification"),
("bert", "FlaxBertForTokenClassification"),
("big_bird", "FlaxBigBirdForTokenClassification"),
("distilbert", "FlaxDistilBertForTokenClassification"),
("electra", "FlaxElectraForTokenClassification"),
("roberta", "FlaxRobertaForTokenClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"),
("roformer", "FlaxRoFormerForTokenClassification"),
("xlm-roberta", "FlaxXLMRobertaForTokenClassification"),
]
)
_UpperCamelCase : str = OrderedDict(
[
# Model for Multiple Choice mapping
("albert", "FlaxAlbertForMultipleChoice"),
("bert", "FlaxBertForMultipleChoice"),
("big_bird", "FlaxBigBirdForMultipleChoice"),
("distilbert", "FlaxDistilBertForMultipleChoice"),
("electra", "FlaxElectraForMultipleChoice"),
("roberta", "FlaxRobertaForMultipleChoice"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"),
("roformer", "FlaxRoFormerForMultipleChoice"),
("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"),
]
)
_UpperCamelCase : Optional[int] = OrderedDict(
[
("bert", "FlaxBertForNextSentencePrediction"),
]
)
_UpperCamelCase : Optional[int] = OrderedDict(
[
("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"),
("whisper", "FlaxWhisperForConditionalGeneration"),
]
)
_UpperCamelCase : List[Any] = OrderedDict(
[
("whisper", "FlaxWhisperForAudioClassification"),
]
)
_UpperCamelCase : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
_UpperCamelCase : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
_UpperCamelCase : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
_UpperCamelCase : Dict = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
_UpperCamelCase : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
_UpperCamelCase : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
_UpperCamelCase : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
_UpperCamelCase : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
_UpperCamelCase : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
_UpperCamelCase : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
_UpperCamelCase : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
_UpperCamelCase : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
_UpperCamelCase : str = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
_UpperCamelCase : str = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class UpperCAmelCase_ ( _BaseAutoModelClass):
lowerCamelCase__ : Union[str, Any] = FLAX_MODEL_MAPPING
_UpperCamelCase : Optional[Any] = auto_class_update(FlaxAutoModel)
class UpperCAmelCase_ ( _BaseAutoModelClass):
lowerCamelCase__ : Optional[int] = FLAX_MODEL_FOR_PRETRAINING_MAPPING
_UpperCamelCase : Tuple = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining")
class UpperCAmelCase_ ( _BaseAutoModelClass):
lowerCamelCase__ : str = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
_UpperCamelCase : Union[str, Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling")
class UpperCAmelCase_ ( _BaseAutoModelClass):
lowerCamelCase__ : Any = FLAX_MODEL_FOR_MASKED_LM_MAPPING
_UpperCamelCase : str = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling")
class UpperCAmelCase_ ( _BaseAutoModelClass):
lowerCamelCase__ : Optional[int] = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_UpperCamelCase : Tuple = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base"
)
class UpperCAmelCase_ ( _BaseAutoModelClass):
lowerCamelCase__ : Union[str, Any] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_UpperCamelCase : Optional[Any] = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="sequence classification"
)
class UpperCAmelCase_ ( _BaseAutoModelClass):
lowerCamelCase__ : str = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
_UpperCamelCase : Union[str, Any] = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering")
class UpperCAmelCase_ ( _BaseAutoModelClass):
lowerCamelCase__ : Tuple = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
_UpperCamelCase : Optional[Any] = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="token classification"
)
class UpperCAmelCase_ ( _BaseAutoModelClass):
lowerCamelCase__ : Any = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
_UpperCamelCase : List[Any] = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice")
class UpperCAmelCase_ ( _BaseAutoModelClass):
lowerCamelCase__ : Optional[int] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
_UpperCamelCase : List[Any] = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction"
)
class UpperCAmelCase_ ( _BaseAutoModelClass):
lowerCamelCase__ : Optional[int] = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
_UpperCamelCase : int = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="image classification"
)
class UpperCAmelCase_ ( _BaseAutoModelClass):
lowerCamelCase__ : Union[str, Any] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
_UpperCamelCase : Union[str, Any] = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling")
class UpperCAmelCase_ ( _BaseAutoModelClass):
lowerCamelCase__ : List[str] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
_UpperCamelCase : Optional[int] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling"
)
| 645
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class UpperCAmelCase_ ( _a):
def __init__( self , a , a=1_3 , a=7 , a=True , a=True , a=False , a=True , a=9_9 , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=5_1_2 , a=1_6 , a=2 , a=0.02 , a=3 , a=4 , a=None , ) -> Any:
lowercase__ : Tuple = parent
lowercase__ : List[Any] = batch_size
lowercase__ : List[Any] = seq_length
lowercase__ : List[Any] = is_training
lowercase__ : Optional[Any] = use_input_mask
lowercase__ : Optional[int] = use_token_type_ids
lowercase__ : int = use_labels
lowercase__ : Tuple = vocab_size
lowercase__ : int = hidden_size
lowercase__ : Any = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
lowercase__ : Optional[Any] = intermediate_size
lowercase__ : Optional[Any] = hidden_act
lowercase__ : List[str] = hidden_dropout_prob
lowercase__ : List[Any] = attention_probs_dropout_prob
lowercase__ : List[Any] = max_position_embeddings
lowercase__ : List[str] = type_vocab_size
lowercase__ : Tuple = type_sequence_label_size
lowercase__ : List[Any] = initializer_range
lowercase__ : str = num_labels
lowercase__ : Tuple = num_choices
lowercase__ : str = scope
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : str = None
if self.use_input_mask:
lowercase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Dict = None
lowercase__ : Optional[Any] = None
lowercase__ : int = None
if self.use_labels:
lowercase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ : List[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self ) -> Optional[int]:
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Dict:
lowercase__ : Tuple = DistilBertModel(config=a )
model.to(a )
model.eval()
lowercase__ : Any = model(a , a )
lowercase__ : str = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Dict:
lowercase__ : Optional[int] = DistilBertForMaskedLM(config=a )
model.to(a )
model.eval()
lowercase__ : Union[str, Any] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> int:
lowercase__ : Tuple = DistilBertForQuestionAnswering(config=a )
model.to(a )
model.eval()
lowercase__ : Tuple = model(
a , attention_mask=a , start_positions=a , end_positions=a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> List[str]:
lowercase__ : int = self.num_labels
lowercase__ : Dict = DistilBertForSequenceClassification(a )
model.to(a )
model.eval()
lowercase__ : Optional[Any] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Any:
lowercase__ : Any = self.num_labels
lowercase__ : List[str] = DistilBertForTokenClassification(config=a )
model.to(a )
model.eval()
lowercase__ : Any = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Tuple:
lowercase__ : List[Any] = self.num_choices
lowercase__ : Any = DistilBertForMultipleChoice(config=a )
model.to(a )
model.eval()
lowercase__ : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : int = model(
a , attention_mask=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Union[str, Any] = self.prepare_config_and_inputs()
((lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__)) : List[str] = config_and_inputs
lowercase__ : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : List[str] = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCamelCase__ : str = (
{
"feature-extraction": DistilBertModel,
"fill-mask": DistilBertForMaskedLM,
"question-answering": DistilBertForQuestionAnswering,
"text-classification": DistilBertForSequenceClassification,
"token-classification": DistilBertForTokenClassification,
"zero-shot": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : Any = True
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : Optional[Any] = True
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : str = DistilBertModelTester(self )
lowercase__ : int = ConfigTester(self , config_class=a , dim=3_7 )
def _UpperCAmelCase ( self ) -> Dict:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*a )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*a )
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*a )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*a )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*a )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*a )
@slow
def _UpperCAmelCase ( self ) -> str:
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : str = DistilBertModel.from_pretrained(a )
self.assertIsNotNone(a )
@slow
@require_torch_gpu
def _UpperCAmelCase ( self ) -> Any:
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
lowercase__ : Optional[int] = True
lowercase__ : Union[str, Any] = model_class(config=a )
lowercase__ : int = self._prepare_for_class(a , a )
lowercase__ : Tuple = torch.jit.trace(
a , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a , os.path.join(a , 'traced_model.pt' ) )
lowercase__ : Optional[int] = torch.jit.load(os.path.join(a , 'traced_model.pt' ) , map_location=a )
loaded(inputs_dict['input_ids'].to(a ) , inputs_dict['attention_mask'].to(a ) )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : int = DistilBertModel.from_pretrained('distilbert-base-uncased' )
lowercase__ : Union[str, Any] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
lowercase__ : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase__ : Optional[Any] = model(a , attention_mask=a )[0]
lowercase__ : Tuple = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , a )
lowercase__ : List[Any] = torch.tensor(
[[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) )
| 645
| 1
|
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCAmelCase_ ( _a , unittest.TestCase):
lowerCamelCase__ : List[Any] = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def _UpperCAmelCase ( self , a=0 ) -> Optional[Any]:
lowercase__ : Optional[int] = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(a ) )
lowercase__ : List[Any] = np.random.RandomState(a )
lowercase__ : int = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Tuple = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=a )
lowercase__ : int = self.get_dummy_inputs()
lowercase__ : Optional[Any] = pipe(**a ).images
lowercase__ : int = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowercase__ : Union[str, Any] = np.array([0.69_643, 0.58_484, 0.50_314, 0.58_760, 0.55_368, 0.59_643, 0.51_529, 0.41_217, 0.49_087] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : Union[str, Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowercase__ : Optional[Any] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=a )
pipe.set_progress_bar_config(disable=a )
lowercase__ : List[Any] = self.get_dummy_inputs()
lowercase__ : str = pipe(**a ).images
lowercase__ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowercase__ : Union[str, Any] = np.array([0.61_737, 0.54_642, 0.53_183, 0.54_465, 0.52_742, 0.60_525, 0.49_969, 0.40_655, 0.48_154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _UpperCAmelCase ( self ) -> str:
lowercase__ : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowercase__ : int = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a )
# warmup pass to apply optimizations
lowercase__ : str = pipe(**self.get_dummy_inputs() )
lowercase__ : Optional[int] = self.get_dummy_inputs()
lowercase__ : int = pipe(**a ).images
lowercase__ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowercase__ : str = np.array([0.52_761, 0.59_977, 0.49_033, 0.49_619, 0.54_282, 0.50_311, 0.47_600, 0.40_918, 0.45_203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Union[str, Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowercase__ : Optional[int] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a )
lowercase__ : List[str] = self.get_dummy_inputs()
lowercase__ : Tuple = pipe(**a ).images
lowercase__ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowercase__ : List[Any] = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowercase__ : Optional[Any] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a )
lowercase__ : str = self.get_dummy_inputs()
lowercase__ : int = pipe(**a ).images
lowercase__ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowercase__ : Optional[int] = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ : Tuple = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowercase__ : Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a )
lowercase__ : Optional[Any] = self.get_dummy_inputs()
lowercase__ : Tuple = pipe(**a ).images
lowercase__ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowercase__ : List[Any] = np.array([0.65_331, 0.58_277, 0.48_204, 0.56_059, 0.53_665, 0.56_235, 0.50_969, 0.40_009, 0.46_552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase):
@property
def _UpperCAmelCase ( self ) -> int:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Tuple = ort.SessionOptions()
lowercase__ : List[Any] = False
return options
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
lowercase__ : Optional[int] = init_image.resize((7_6_8, 5_1_2) )
# using the PNDM scheduler by default
lowercase__ : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a )
lowercase__ : int = 'A fantasy landscape, trending on artstation'
lowercase__ : int = np.random.RandomState(0 )
lowercase__ : str = pipe(
prompt=a , image=a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=1_0 , generator=a , output_type='np' , )
lowercase__ : List[str] = output.images
lowercase__ : List[Any] = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 7_6_8, 3)
lowercase__ : Optional[Any] = np.array([0.4_909, 0.5_059, 0.5_372, 0.4_623, 0.4_876, 0.5_049, 0.4_820, 0.4_956, 0.5_019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
lowercase__ : Optional[int] = init_image.resize((7_6_8, 5_1_2) )
lowercase__ : Optional[Any] = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' )
lowercase__ : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=a , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a )
lowercase__ : str = 'A fantasy landscape, trending on artstation'
lowercase__ : Any = np.random.RandomState(0 )
lowercase__ : Optional[int] = pipe(
prompt=a , image=a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=2_0 , generator=a , output_type='np' , )
lowercase__ : str = output.images
lowercase__ : Tuple = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 7_6_8, 3)
lowercase__ : List[str] = np.array([0.8_043, 0.926, 0.9_581, 0.8_119, 0.8_954, 0.913, 0.7_209, 0.7_463, 0.7_431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 645
|
"""simple docstring"""
from __future__ import annotations
def a_ ( _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float , ):
'''simple docstring'''
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif stress < 0:
raise ValueError('Stress cannot be negative' )
elif tangential_force < 0:
raise ValueError('Tangential Force cannot be negative' )
elif area < 0:
raise ValueError('Area cannot be negative' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 645
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.